Compare commits
491 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
131413ddff | ||
![]() |
ccff9ca09c | ||
![]() |
436a5be49c | ||
![]() |
cc0bf96398 | ||
![]() |
386169205c | ||
![]() |
0d6342a882 | ||
![]() |
75bee074b6 | ||
![]() |
533d76368c | ||
![]() |
459f4a7889 | ||
![]() |
25c63c91d8 | ||
![]() |
cbfff4f868 | ||
![]() |
7ed5a39bc7 | ||
![]() |
cc1d03f4ec | ||
![]() |
846f593dbf | ||
![]() |
0a53da03fd | ||
![]() |
2ce1793a1d | ||
![]() |
e1c5be24e7 | ||
![]() |
2ad8a074ac | ||
![]() |
7e547c6833 | ||
![]() |
689842b9ff | ||
![]() |
a19d47642e | ||
![]() |
a7dad24d92 | ||
![]() |
6b213216d5 | ||
![]() |
fe6f3b48f7 | ||
![]() |
36c88cb9db | ||
![]() |
235e43d7f6 | ||
![]() |
730996e530 | ||
![]() |
ce6197a8e0 | ||
![]() |
46b9953f32 | ||
![]() |
4dcceeffb7 | ||
![]() |
019e4a4558 | ||
![]() |
627d04d927 | ||
![]() |
940e8ebec3 | ||
![]() |
565648f3f7 | ||
![]() |
90c49bed57 | ||
![]() |
3a2477174f | ||
![]() |
8c6c2cbc8c | ||
![]() |
5dc0cff459 | ||
![]() |
c5c8b4b16a | ||
![]() |
8299bf76ed | ||
![]() |
ee4979e510 | ||
![]() |
08b0e04f40 | ||
![]() |
b36b0b71f8 | ||
![]() |
094df37563 | ||
![]() |
f3648fd206 | ||
![]() |
bd93a94abd | ||
![]() |
f55bdb6f10 | ||
![]() |
2870a9bfc8 | ||
![]() |
c031c211d1 | ||
![]() |
68391b0055 | ||
![]() |
b7e137323a | ||
![]() |
8fa3f366ad | ||
![]() |
fddb303f23 | ||
![]() |
ad5ee20c7b | ||
![]() |
785b4eb5bf | ||
![]() |
16ede1b30b | ||
![]() |
17d6bbbb2a | ||
![]() |
6481b7f34c | ||
![]() |
cb4a80b693 | ||
![]() |
68d7255bd3 | ||
![]() |
9ef2fce33a | ||
![]() |
43eaba3d60 | ||
![]() |
1af493c5a0 | ||
![]() |
a0c3e989de | ||
![]() |
7af0fdce48 | ||
![]() |
ee94693b1a | ||
![]() |
731dbdc1a5 | ||
![]() |
06bcfbd629 | ||
![]() |
7d7c2510f8 | ||
![]() |
f9b2f999ac | ||
![]() |
c416087339 | ||
![]() |
6002cebd2c | ||
![]() |
212bdc541c | ||
![]() |
dca6686273 | ||
![]() |
598621afab | ||
![]() |
6479f49c09 | ||
![]() |
b2974a7095 | ||
![]() |
832b4db9d4 | ||
![]() |
c43873f33b | ||
![]() |
11d82d7b9b | ||
![]() |
36fe2deebf | ||
![]() |
4a8931f634 | ||
![]() |
bd6e38fb1a | ||
![]() |
92189a5855 | ||
![]() |
d790bf9916 | ||
![]() |
35afac099a | ||
![]() |
811c3d1900 | ||
![]() |
3553d10769 | ||
![]() |
6fe178134d | ||
![]() |
d890890f66 | ||
![]() |
89ba19feca | ||
![]() |
6f58c77671 | ||
![]() |
3c975f898f | ||
![]() |
9245c8a1df | ||
![]() |
7a537cdca9 | ||
![]() |
257ffeb997 | ||
![]() |
9b513bb6b1 | ||
![]() |
042100f797 | ||
![]() |
7804b8fab9 | ||
![]() |
56497663c8 | ||
![]() |
e1afcb8af2 | ||
![]() |
385eeea357 | ||
![]() |
8a41b244e8 | ||
![]() |
92578798bb | ||
![]() |
788637918a | ||
![]() |
c413a55093 | ||
![]() |
630bb75d2a | ||
![]() |
a2055a1e93 | ||
![]() |
b599946b74 | ||
![]() |
aca2d65b82 | ||
![]() |
b5e08e3373 | ||
![]() |
274d5a5fdf | ||
![]() |
fc6b49be32 | ||
![]() |
77295f716e | ||
![]() |
615f7d1dea | ||
![]() |
cdf5e106ae | ||
![]() |
a85329f59a | ||
![]() |
f2ba1311aa | ||
![]() |
65dcd0ce35 | ||
![]() |
0040f543a2 | ||
![]() |
767f9bdbbb | ||
![]() |
f7f5169c94 | ||
![]() |
2cfffea02e | ||
![]() |
f6e98334e4 | ||
![]() |
ab0668293c | ||
![]() |
af4cf55884 | ||
![]() |
d6786f2945 | ||
![]() |
38dc2f79bc | ||
![]() |
cb961c87ca | ||
![]() |
0560b28a8d | ||
![]() |
10199c5987 | ||
![]() |
288814d3e4 | ||
![]() |
04733438da | ||
![]() |
711e891f0f | ||
![]() |
090d08422b | ||
![]() |
5b84404c64 | ||
![]() |
8544edca21 | ||
![]() |
5d22319a2c | ||
![]() |
2130c0708b | ||
![]() |
61ff1946e6 | ||
![]() |
d06bc0cb6e | ||
![]() |
d104b7e997 | ||
![]() |
9e2de1bd2c | ||
![]() |
dc87e9c9ae | ||
![]() |
367cb68dc1 | ||
![]() |
c02c0cd483 | ||
![]() |
1852755154 | ||
![]() |
6f2ce74231 | ||
![]() |
6edcc5c79f | ||
![]() |
b1f7123301 | ||
![]() |
1fbf3585d6 | ||
![]() |
99d5161e8a | ||
![]() |
ea8380be45 | ||
![]() |
4f25092dc1 | ||
![]() |
4fc10acce9 | ||
![]() |
0a4f21c0a7 | ||
![]() |
9abb66254a | ||
![]() |
1d0ebe67e8 | ||
![]() |
a1b2d95f96 | ||
![]() |
c0b1bf7537 | ||
![]() |
cdfeb165ca | ||
![]() |
92d454ec5f | ||
![]() |
9333b0cc82 | ||
![]() |
9771b1ec51 | ||
![]() |
76db4a49cf | ||
![]() |
4aa0976a2e | ||
![]() |
92c20fdae6 | ||
![]() |
c951da7096 | ||
![]() |
24d82a23a2 | ||
![]() |
f40b3de758 | ||
![]() |
5f4008c296 | ||
![]() |
6ae33d8141 | ||
![]() |
c5664c1fef | ||
![]() |
958a5a8184 | ||
![]() |
8608eb4760 | ||
![]() |
a2b210130f | ||
![]() |
ed20837f9a | ||
![]() |
1db2a61dd0 | ||
![]() |
2ded8ab206 | ||
![]() |
e6b3648bbf | ||
![]() |
0625e805f0 | ||
![]() |
c38ec5befb | ||
![]() |
c577721a43 | ||
![]() |
29c056ea39 | ||
![]() |
9fc3bba9cf | ||
![]() |
7774ed4ae6 | ||
![]() |
11f920f209 | ||
![]() |
6e6b655956 | ||
![]() |
110ae89a6c | ||
![]() |
5e388f931e | ||
![]() |
d5ad41dd7b | ||
![]() |
d294a11bc9 | ||
![]() |
93d887e4bc | ||
![]() |
5306b0269d | ||
![]() |
7de0c8345d | ||
![]() |
1b9dcab3ab | ||
![]() |
86279f4ae3 | ||
![]() |
b934bf23e6 | ||
![]() |
2b8ef455ad | ||
![]() |
0c5f47177c | ||
![]() |
1210db2924 | ||
![]() |
d0854bf1e6 | ||
![]() |
8396463255 | ||
![]() |
a027bbf4d7 | ||
![]() |
ed94a3dd02 | ||
![]() |
f14f62ab3b | ||
![]() |
0fb5268496 | ||
![]() |
c65edb1506 | ||
![]() |
1605af32ec | ||
![]() |
ee3032ad89 | ||
![]() |
5b7a27281d | ||
![]() |
d2a784e33e | ||
![]() |
413a2e4f91 | ||
![]() |
a92fdff620 | ||
![]() |
b5614f3ebc | ||
![]() |
8b2ba9cab8 | ||
![]() |
e29662ab5c | ||
![]() |
cbc40aa996 | ||
![]() |
5cb82540c9 | ||
![]() |
d7849a1dc9 | ||
![]() |
01c44d687e | ||
![]() |
9b12a511ca | ||
![]() |
e20362e0d5 | ||
![]() |
c928ceb927 | ||
![]() |
e1a0846483 | ||
![]() |
f997e29e45 | ||
![]() |
87d9efb364 | ||
![]() |
93d3a2568d | ||
![]() |
5a81390b24 | ||
![]() |
a89ef99aed | ||
![]() |
dc0c725ceb | ||
![]() |
5d71bda478 | ||
![]() |
88897a90e4 | ||
![]() |
9df31c3518 | ||
![]() |
2044f9d4da | ||
![]() |
0d186f3b33 | ||
![]() |
82f5b66c01 | ||
![]() |
c986694367 | ||
![]() |
058d0cd04b | ||
![]() |
ee1c994d15 | ||
![]() |
4cba75efc5 | ||
![]() |
8c83701e9f | ||
![]() |
6137b12799 | ||
![]() |
1fabba474b | ||
![]() |
765770efdb | ||
![]() |
9297ff8330 | ||
![]() |
ee4fd16f2c | ||
![]() |
a9ed7cc6aa | ||
![]() |
6c6a31a1e8 | ||
![]() |
fc6ec356fc | ||
![]() |
1255bc9b45 | ||
![]() |
084e4c782a | ||
![]() |
58ffa03d8b | ||
![]() |
637f8bc6a5 | ||
![]() |
499e9007a5 | ||
![]() |
b9bb5ca288 | ||
![]() |
4e8be787c7 | ||
![]() |
aa45d7c1df | ||
![]() |
e35565c567 | ||
![]() |
a5520bfb42 | ||
![]() |
2627c464ba | ||
![]() |
b58d5d16b0 | ||
![]() |
24580df958 | ||
![]() |
80dd44e80a | ||
![]() |
94e1d96b29 | ||
![]() |
66003e1d05 | ||
![]() |
c345053a8b | ||
![]() |
08d7c2a944 | ||
![]() |
bc9573dcb1 | ||
![]() |
e53bc57d4d | ||
![]() |
f0b398d17f | ||
![]() |
8efbc5df55 | ||
![]() |
ccc3e9ac6d | ||
![]() |
daa4f096f9 | ||
![]() |
3ee85f1c6c | ||
![]() |
2540c9181c | ||
![]() |
83ffb154bc | ||
![]() |
9aa192c812 | ||
![]() |
fc8707686f | ||
![]() |
f89c23764b | ||
![]() |
e6881cabd0 | ||
![]() |
d028853879 | ||
![]() |
949553db23 | ||
![]() |
0c5a454361 | ||
![]() |
f59c4d03f7 | ||
![]() |
7dee25a07f | ||
![]() |
f221637053 | ||
![]() |
45ac07cd02 | ||
![]() |
7d749cc787 | ||
![]() |
e7e91cd71c | ||
![]() |
3920e15386 | ||
![]() |
41e976edde | ||
![]() |
de227b620f | ||
![]() |
63def6ca49 | ||
![]() |
738fe9c4aa | ||
![]() |
a8da0bacbe | ||
![]() |
bf146fb072 | ||
![]() |
f0f4943577 | ||
![]() |
09dd2aeff9 | ||
![]() |
07b4074e7b | ||
![]() |
61dda6a5e0 | ||
![]() |
e1f9ced568 | ||
![]() |
9795b43d93 | ||
![]() |
0980d5c7e3 | ||
![]() |
0dae34b6a7 | ||
![]() |
83c6be1666 | ||
![]() |
1adfa67589 | ||
![]() |
790d24eb7b | ||
![]() |
7de300856b | ||
![]() |
213ffdb548 | ||
![]() |
d42d88386a | ||
![]() |
154f24af91 | ||
![]() |
a1ecdd36d5 | ||
![]() |
d18282bfda | ||
![]() |
9ae76ba8c9 | ||
![]() |
2bc06565c7 | ||
![]() |
d1c2558f7e | ||
![]() |
7b5aefb427 | ||
![]() |
06ef90c051 | ||
![]() |
7efbc84320 | ||
![]() |
e9f6df7dca | ||
![]() |
7fa6e51686 | ||
![]() |
8dc68417e7 | ||
![]() |
681f3c4c42 | ||
![]() |
59a705525c | ||
![]() |
5d3f314b0b | ||
![]() |
adaa13088b | ||
![]() |
62d29b2157 | ||
![]() |
ed19d10aa5 | ||
![]() |
36c2f45c40 | ||
![]() |
742226625f | ||
![]() |
6bb8a16ccb | ||
![]() |
a5dbcf2e73 | ||
![]() |
9304f0e7a8 | ||
![]() |
6578b2f8a1 | ||
![]() |
1c8fd627ad | ||
![]() |
ae950b00f1 | ||
![]() |
eeb40a672c | ||
![]() |
0f541a0367 | ||
![]() |
1363f537ce | ||
![]() |
bc3e21fdc6 | ||
![]() |
a82eb275ff | ||
![]() |
f964aea9a2 | ||
![]() |
42998d797d | ||
![]() |
f4432e1dba | ||
![]() |
982c535428 | ||
![]() |
7df342a6ea | ||
![]() |
8bbff2df98 | ||
![]() |
16b06699fd | ||
![]() |
246dc65417 | ||
![]() |
865fceb73c | ||
![]() |
72266c7684 | ||
![]() |
d3b838ce60 | ||
![]() |
e639a12fa1 | ||
![]() |
e82fcf30c6 | ||
![]() |
495e8b0a6a | ||
![]() |
59734ca24d | ||
![]() |
22ab7f5f88 | ||
![]() |
b25dd1795d | ||
![]() |
304f2b6c96 | ||
![]() |
2ecc3a33c3 | ||
![]() |
ee6e1df118 | ||
![]() |
177b69a211 | ||
![]() |
dad63f0821 | ||
![]() |
041f9ad1a1 | ||
![]() |
7a378f8b66 | ||
![]() |
de0bdd7f29 | ||
![]() |
b1cececb8e | ||
![]() |
e0d39fa3bf | ||
![]() |
968ced2e71 | ||
![]() |
32d1a00017 | ||
![]() |
04e2128273 | ||
![]() |
2cc634689b | ||
![]() |
8f827641b0 | ||
![]() |
95187d7e1e | ||
![]() |
9ec7e37534 | ||
![]() |
2c7f956b38 | ||
![]() |
a9f6c56652 | ||
![]() |
0a892419ad | ||
![]() |
e3054fc74e | ||
![]() |
23c2485044 | ||
![]() |
386c66f285 | ||
![]() |
3b49315f97 | ||
![]() |
5ca05c2e88 | ||
![]() |
7eda70f23b | ||
![]() |
3d79b414d3 | ||
![]() |
c84bbf1dd6 | ||
![]() |
f723bf0879 | ||
![]() |
cbf725a9ba | ||
![]() |
086449b6c7 | ||
![]() |
3cbc6a5c01 | ||
![]() |
54bb49a502 | ||
![]() |
cabaada956 | ||
![]() |
a894cc792d | ||
![]() |
519f4d98ef | ||
![]() |
b963a83559 | ||
![]() |
bf6688abe6 | ||
![]() |
6005b157c2 | ||
![]() |
14220d9833 | ||
![]() |
8ca50f24f3 | ||
![]() |
c149fc3143 | ||
![]() |
afbc763dac | ||
![]() |
5dfe91be8b | ||
![]() |
9f944c00f1 | ||
![]() |
56e87cecb1 | ||
![]() |
5ee6116420 | ||
![]() |
5d9a4cd251 | ||
![]() |
0ebec07569 | ||
![]() |
08265515b3 | ||
![]() |
67e593e355 | ||
![]() |
d15c7622b9 | ||
![]() |
1deb35ca64 | ||
![]() |
e2de886831 | ||
![]() |
f0d7c2f5ea | ||
![]() |
12052a7624 | ||
![]() |
23e1da778d | ||
![]() |
326de48930 | ||
![]() |
18f2cb0472 | ||
![]() |
53bc36d207 | ||
![]() |
4dcf5c3e0b | ||
![]() |
d1b2f532b9 | ||
![]() |
e26085b921 | ||
![]() |
f7b613332c | ||
![]() |
f594c8eb91 | ||
![]() |
76b85bc0e9 | ||
![]() |
af98a1773f | ||
![]() |
9ae9a89883 | ||
![]() |
648f0974c6 | ||
![]() |
fc5230dffa | ||
![]() |
2ab20095b3 | ||
![]() |
f020e1d519 | ||
![]() |
4b2d366c37 | ||
![]() |
56fd4e4ef2 | ||
![]() |
2c8b680b03 | ||
![]() |
99b6b60085 | ||
![]() |
74f00474e1 | ||
![]() |
e9a9580bdd | ||
![]() |
4c33a9ac67 | ||
![]() |
22885aeaee | ||
![]() |
ed969d2a06 | ||
![]() |
d9cf18e28d | ||
![]() |
1556162c90 | ||
![]() |
148f0225c0 | ||
![]() |
4e07941b1e | ||
![]() |
202c29c21a | ||
![]() |
c1c871620a | ||
![]() |
a21a8bef56 | ||
![]() |
522726228a | ||
![]() |
9770e3b325 | ||
![]() |
d617823355 | ||
![]() |
6ed991c8e2 | ||
![]() |
e41576e768 | ||
![]() |
155c1640f1 | ||
![]() |
f7d4947573 | ||
![]() |
0d7a133b15 | ||
![]() |
e863066144 | ||
![]() |
89a92477ad | ||
![]() |
5cda9cdd13 | ||
![]() |
e5914eb320 | ||
![]() |
ab78f48ff8 | ||
![]() |
b1c88eb978 | ||
![]() |
efae43f932 | ||
![]() |
d3ee1329e9 | ||
![]() |
700c719422 | ||
![]() |
55aa4aaf0f | ||
![]() |
820f95c4c4 | ||
![]() |
3a05d3def7 | ||
![]() |
edac9c2446 | ||
![]() |
d9c2687fd0 | ||
![]() |
6517bcc53c | ||
![]() |
4f54f25b66 | ||
![]() |
6a6828bddf | ||
![]() |
c0e7a3b90e | ||
![]() |
f27bc261cf | ||
![]() |
21e6197c0b | ||
![]() |
75d7d681c9 | ||
![]() |
81d8d7b73f | ||
![]() |
5c0de09a07 | ||
![]() |
20bf000e55 | ||
![]() |
40d0c4a1dc | ||
![]() |
be889b2f81 | ||
![]() |
37c9a8eea9 | ||
![]() |
6de5d032e1 | ||
![]() |
d791df75dd | ||
![]() |
020a3b3530 | ||
![]() |
fccf8d179f | ||
![]() |
43c40c500e | ||
![]() |
e37f4c4f42 | ||
![]() |
da36196d79 | ||
![]() |
42903973b7 | ||
![]() |
8f2df948ab |
@@ -1,7 +1,8 @@
|
||||
build
|
||||
llama/build
|
||||
.venv
|
||||
.vscode
|
||||
ollama
|
||||
app
|
||||
web
|
||||
dist
|
||||
scripts
|
||||
llm/llama.cpp/ggml
|
||||
llm/llama.cpp/gguf
|
||||
.env
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -5,4 +5,4 @@
|
||||
.swp
|
||||
dist
|
||||
ollama
|
||||
/ggml-metal.metal
|
||||
ggml-metal.metal
|
||||
|
10
.gitmodules
vendored
Normal file
10
.gitmodules
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
[submodule "llm/llama.cpp/ggml"]
|
||||
path = llm/llama.cpp/ggml
|
||||
url = https://github.com/ggerganov/llama.cpp.git
|
||||
ignore = dirty
|
||||
shallow = true
|
||||
[submodule "llm/llama.cpp/gguf"]
|
||||
path = llm/llama.cpp/gguf
|
||||
url = https://github.com/ggerganov/llama.cpp.git
|
||||
ignore = dirty
|
||||
shallow = true
|
28
Dockerfile
28
Dockerfile
@@ -1,15 +1,23 @@
|
||||
FROM golang:1.20
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
COPY . .
|
||||
RUN CGO_ENABLED=1 go build -ldflags '-linkmode external -extldflags "-static"' .
|
||||
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
||||
|
||||
FROM alpine
|
||||
ARG TARGETARCH
|
||||
ARG GOFLAGS="'-ldflags=-w -s'"
|
||||
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
RUN apt-get update && apt-get install -y git build-essential cmake
|
||||
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
|
||||
|
||||
COPY . .
|
||||
ENV GOARCH=$TARGETARCH
|
||||
ENV GOFLAGS=$GOFLAGS
|
||||
RUN /usr/local/go/bin/go generate ./... \
|
||||
&& /usr/local/go/bin/go build .
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
COPY --from=0 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
||||
EXPOSE 11434
|
||||
ARG USER=ollama
|
||||
ARG GROUP=ollama
|
||||
RUN addgroup -g 1000 $GROUP && adduser -u 1000 -DG $GROUP $USER
|
||||
USER $USER:$GROUP
|
||||
ENTRYPOINT ["/bin/ollama"]
|
||||
ENV OLLAMA_HOST 0.0.0.0
|
||||
ENTRYPOINT ["/bin/ollama"]
|
||||
CMD ["serve"]
|
||||
|
31
Dockerfile.build
Normal file
31
Dockerfile.build
Normal file
@@ -0,0 +1,31 @@
|
||||
# centos7 amd64 dependencies
|
||||
FROM --platform=linux/amd64 nvidia/cuda:11.3.1-devel-centos7 AS base-amd64
|
||||
RUN yum install -y https://repo.ius.io/ius-release-el7.rpm centos-release-scl && \
|
||||
yum update -y && \
|
||||
yum install -y devtoolset-10-gcc devtoolset-10-gcc-c++ git236 wget
|
||||
RUN wget "https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-x86_64.sh" -O cmake-installer.sh && chmod +x cmake-installer.sh && ./cmake-installer.sh --skip-license --prefix=/usr/local
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
|
||||
# centos8 arm64 dependencies
|
||||
FROM --platform=linux/arm64 nvidia/cuda-arm64:11.3.1-devel-centos8 AS base-arm64
|
||||
RUN sed -i -e 's/mirrorlist/#mirrorlist/g' -e 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
|
||||
RUN yum install -y git cmake
|
||||
|
||||
FROM base-${TARGETARCH}
|
||||
ARG TARGETARCH
|
||||
ARG GOFLAGS="'-ldflags -w -s'"
|
||||
|
||||
# install go
|
||||
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
|
||||
|
||||
# build the final binary
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
COPY . .
|
||||
|
||||
ENV GOOS=linux
|
||||
ENV GOARCH=$TARGETARCH
|
||||
ENV GOFLAGS=$GOFLAGS
|
||||
|
||||
RUN /usr/local/go/bin/go generate ./... && \
|
||||
/usr/local/go/bin/go build .
|
192
README.md
192
README.md
@@ -9,19 +9,31 @@
|
||||
|
||||
[](https://discord.gg/ollama)
|
||||
|
||||
Run, create, and share large language models (LLMs).
|
||||
Get up and running with large language models locally.
|
||||
|
||||
> Note: Ollama is in early preview. Please report any issues you find.
|
||||
### macOS
|
||||
|
||||
## Download
|
||||
[Download](https://ollama.ai/download/Ollama-darwin.zip)
|
||||
|
||||
- [Download](https://ollama.ai/download) for macOS
|
||||
- Download for Windows and Linux (coming soon)
|
||||
- Build [from source](#building)
|
||||
### Windows
|
||||
|
||||
Coming soon!
|
||||
|
||||
### Linux & WSL2
|
||||
|
||||
```
|
||||
curl https://ollama.ai/install.sh | sh
|
||||
```
|
||||
|
||||
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
|
||||
|
||||
### Docker
|
||||
|
||||
See the official [Docker image](https://hub.docker.com/r/ollama/ollama).
|
||||
|
||||
## Quickstart
|
||||
|
||||
To run and chat with [Llama 2](https://ai.meta.com/llama), the new model by Meta:
|
||||
To run and chat with [Llama 2](https://ollama.ai/library/llama2):
|
||||
|
||||
```
|
||||
ollama run llama2
|
||||
@@ -29,50 +41,59 @@ ollama run llama2
|
||||
|
||||
## Model library
|
||||
|
||||
`ollama` includes a library of open-source models:
|
||||
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library 'ollama model library')
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------------ | ---------- | ----- | ------------------------------- |
|
||||
| Llama2 | 7B | 3.8GB | `ollama pull llama2` |
|
||||
| Llama2 13B | 13B | 7.3GB | `ollama pull llama2:13b` |
|
||||
| Llama2 70B | 70B | 39GB | `ollama pull llama2:70b` |
|
||||
| Llama2 Uncensored | 7B | 3.8GB | `ollama pull llama2-uncensored` |
|
||||
| Orca Mini | 3B | 1.9GB | `ollama pull orca` |
|
||||
| Vicuna | 7B | 3.8GB | `ollama pull vicuna` |
|
||||
| Nous-Hermes | 13B | 7.3GB | `ollama pull nous-hermes` |
|
||||
| Wizard Vicuna Uncensored | 13B | 7.3GB | `ollama pull wizard-vicuna` |
|
||||
Here are some example open-source models that can be downloaded:
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
|
||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||
| Llama 2 13B | 13B | 7.3GB | `ollama run llama2:13b` |
|
||||
| Llama 2 70B | 70B | 39GB | `ollama run llama2:70b` |
|
||||
| Orca Mini | 3B | 1.9GB | `ollama run orca-mini` |
|
||||
| Vicuna | 7B | 3.8GB | `ollama run vicuna` |
|
||||
|
||||
> Note: You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
|
||||
|
||||
## Examples
|
||||
## Customize your own model
|
||||
|
||||
### Run a model
|
||||
### Import from GGUF
|
||||
|
||||
```
|
||||
ollama run llama2
|
||||
>>> hi
|
||||
Hello! How can I help you today?
|
||||
```
|
||||
Ollama supports importing GGUF models in the Modelfile:
|
||||
|
||||
For multiline input, you can wrap text with `"""`:
|
||||
1. Create a file named `Modelfile`, with a `FROM` instruction with the local filepath to the model you want to import.
|
||||
|
||||
```
|
||||
>>> """Hello,
|
||||
... world!
|
||||
... """
|
||||
I'm a basic program that prints the famous "Hello, world!" message to the console.
|
||||
```
|
||||
```
|
||||
FROM ./vicuna-33b.Q4_0.gguf
|
||||
```
|
||||
|
||||
### Create a custom model
|
||||
2. Create the model in Ollama
|
||||
|
||||
Pull a base model:
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
3. Run the model
|
||||
|
||||
```
|
||||
ollama run example
|
||||
```
|
||||
|
||||
### Import from PyTorch or Safetensors
|
||||
|
||||
See the [guide](docs/import.md) on importing models for more information.
|
||||
|
||||
### Customize a prompt
|
||||
|
||||
Models from the Ollama library can be customized with a prompt. The example
|
||||
|
||||
```
|
||||
ollama pull llama2
|
||||
```
|
||||
|
||||
> To update a model to the latest version, run `ollama pull llama2` again. The model will be updated (if necessary).
|
||||
|
||||
Create a `Modelfile`:
|
||||
|
||||
```
|
||||
@@ -96,44 +117,85 @@ ollama run mario
|
||||
Hello! It's your friend Mario.
|
||||
```
|
||||
|
||||
For more examples, see the [examples](./examples) directory. For more information on creating a Modelfile, see the [Modelfile](./docs/modelfile.md) documentation.
|
||||
For more examples, see the [examples](examples) directory. For more information on working with a Modelfile, see the [Modelfile](docs/modelfile.md) documentation.
|
||||
|
||||
### Pull a model from the registry
|
||||
## CLI Reference
|
||||
|
||||
### Create a model
|
||||
|
||||
`ollama create` is used to create a model from a Modelfile.
|
||||
|
||||
### Pull a model
|
||||
|
||||
```
|
||||
ollama pull orca
|
||||
ollama pull llama2
|
||||
```
|
||||
|
||||
### Listing local models
|
||||
> This command can also be used to update a local model. Only the diff will be pulled.
|
||||
|
||||
### Remove a model
|
||||
|
||||
```
|
||||
ollama rm llama2
|
||||
```
|
||||
|
||||
### Copy a model
|
||||
|
||||
```
|
||||
ollama cp llama2 my-llama2
|
||||
```
|
||||
|
||||
### Multiline input
|
||||
|
||||
For multiline input, you can wrap text with `"""`:
|
||||
|
||||
```
|
||||
>>> """Hello,
|
||||
... world!
|
||||
... """
|
||||
I'm a basic program that prints the famous "Hello, world!" message to the console.
|
||||
```
|
||||
|
||||
### Pass in prompt as arguments
|
||||
|
||||
```
|
||||
$ ollama run llama2 "summarize this file:" "$(cat README.md)"
|
||||
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||
```
|
||||
|
||||
### List models on your computer
|
||||
|
||||
```
|
||||
ollama list
|
||||
```
|
||||
|
||||
## Model packages
|
||||
### Start Ollama
|
||||
|
||||
### Overview
|
||||
|
||||
Ollama bundles model weights, configuration, and data into a single package, defined by a [Modelfile](./docs/modelfile.md).
|
||||
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" height="480" srcset="https://github.com/jmorganca/ollama/assets/251292/2fd96b5f-191b-45c1-9668-941cfad4eb70">
|
||||
<img alt="logo" height="480" src="https://github.com/jmorganca/ollama/assets/251292/2fd96b5f-191b-45c1-9668-941cfad4eb70">
|
||||
</picture>
|
||||
`ollama serve` is used when you want to start ollama without running the desktop application.
|
||||
|
||||
## Building
|
||||
|
||||
Install `cmake` and `go`:
|
||||
|
||||
```
|
||||
brew install cmake
|
||||
brew install go
|
||||
```
|
||||
|
||||
Then generate dependencies and build:
|
||||
|
||||
```
|
||||
go generate ./...
|
||||
go build .
|
||||
```
|
||||
|
||||
To run it start the server:
|
||||
Next, start the server:
|
||||
|
||||
```
|
||||
./ollama serve &
|
||||
./ollama serve
|
||||
```
|
||||
|
||||
Finally, run a model!
|
||||
Finally, in a separate shell, run a model:
|
||||
|
||||
```
|
||||
./ollama run llama2
|
||||
@@ -141,7 +203,7 @@ Finally, run a model!
|
||||
|
||||
## REST API
|
||||
|
||||
> See the [API documentation](./docs/api.md) for all endpoints.
|
||||
See the [API documentation](docs/api.md) for all endpoints.
|
||||
|
||||
Ollama has an API for running and managing models. For example to generate text from a model:
|
||||
|
||||
@@ -152,10 +214,22 @@ curl -X POST http://localhost:11434/api/generate -d '{
|
||||
}'
|
||||
```
|
||||
|
||||
## Tools using Ollama
|
||||
## Community Integrations
|
||||
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with a question-answering [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa).
|
||||
- [Continue](https://github.com/continuedev/continue) - embeds Ollama inside Visual Studio Code. The extension lets you highlight code to add to the prompt, ask questions in the sidebar, and generate code inline.
|
||||
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) - interact with Ollama as a chatbot on Discord.
|
||||
- [Raycast Ollama](https://github.com/MassimilianoPasquini97/raycast_ollama) - Raycast extension to use Ollama for local llama inference on Raycast.
|
||||
- [Simple HTML UI for Ollama](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||
- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama)
|
||||
- [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel)
|
||||
- [Continue](https://github.com/continuedev/continue)
|
||||
- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)
|
||||
- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)
|
||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||
- [Dumbar](https://github.com/JerrySievert/Dumbar)
|
||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||
- [oterm](https://github.com/ggozad/oterm)
|
||||
- [Ellama Emacs client](https://github.com/s-kostyaev/ellama)
|
||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||
|
101
api/client.go
101
api/client.go
@@ -7,18 +7,28 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/jmorganca/ollama/format"
|
||||
"github.com/jmorganca/ollama/version"
|
||||
)
|
||||
|
||||
const DefaultHost = "127.0.0.1:11434"
|
||||
|
||||
var envHost = os.Getenv("OLLAMA_HOST")
|
||||
|
||||
type Client struct {
|
||||
base url.URL
|
||||
HTTP http.Client
|
||||
Headers http.Header
|
||||
base *url.URL
|
||||
http http.Client
|
||||
}
|
||||
|
||||
func checkError(resp *http.Response, body []byte) error {
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
|
||||
if resp.StatusCode < http.StatusBadRequest {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -33,16 +43,46 @@ func checkError(resp *http.Response, body []byte) error {
|
||||
return apiError
|
||||
}
|
||||
|
||||
func NewClient(hosts ...string) *Client {
|
||||
host := "127.0.0.1:11434"
|
||||
if len(hosts) > 0 {
|
||||
host = hosts[0]
|
||||
func ClientFromEnvironment() (*Client, error) {
|
||||
scheme, hostport, ok := strings.Cut(os.Getenv("OLLAMA_HOST"), "://")
|
||||
if !ok {
|
||||
scheme, hostport = "http", os.Getenv("OLLAMA_HOST")
|
||||
}
|
||||
|
||||
return &Client{
|
||||
base: url.URL{Scheme: "http", Host: host},
|
||||
HTTP: http.Client{},
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
host, port = "127.0.0.1", "11434"
|
||||
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
||||
host = ip.String()
|
||||
} else if hostport != "" {
|
||||
host = hostport
|
||||
}
|
||||
}
|
||||
|
||||
client := Client{
|
||||
base: &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: net.JoinHostPort(host, port),
|
||||
},
|
||||
}
|
||||
|
||||
mockRequest, err := http.NewRequest("HEAD", client.base.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxyURL, err := http.ProxyFromEnvironment(mockRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.http = http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyURL(proxyURL),
|
||||
},
|
||||
}
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
|
||||
@@ -57,21 +97,17 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
url := c.base.JoinPath(path).String()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, reqBody)
|
||||
requestURL := c.base.JoinPath(path)
|
||||
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), reqBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
request.Header.Set("Accept", "application/json")
|
||||
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
|
||||
|
||||
for k, v := range c.Headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
|
||||
respObj, err := c.HTTP.Do(req)
|
||||
respObj, err := c.http.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -94,6 +130,8 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxBufferSize = 512 * format.KiloByte
|
||||
|
||||
func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error {
|
||||
var buf *bytes.Buffer
|
||||
if data != nil {
|
||||
@@ -105,21 +143,26 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
|
||||
buf = bytes.NewBuffer(bts)
|
||||
}
|
||||
|
||||
request, err := http.NewRequestWithContext(ctx, method, c.base.JoinPath(path).String(), buf)
|
||||
requestURL := c.base.JoinPath(path)
|
||||
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
request.Header.Set("Accept", "application/json")
|
||||
request.Header.Set("Accept", "application/x-ndjson")
|
||||
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
|
||||
|
||||
response, err := http.DefaultClient.Do(request)
|
||||
response, err := c.http.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
scanner := bufio.NewScanner(response.Body)
|
||||
// increase the buffer size to avoid running out of space
|
||||
scanBuf := make([]byte, 0, maxBufferSize)
|
||||
scanner.Buffer(scanBuf, maxBufferSize)
|
||||
for scanner.Scan() {
|
||||
var errorResponse struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
@@ -134,7 +177,7 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
|
||||
return fmt.Errorf(errorResponse.Error)
|
||||
}
|
||||
|
||||
if response.StatusCode >= 400 {
|
||||
if response.StatusCode >= http.StatusBadRequest {
|
||||
return StatusError{
|
||||
StatusCode: response.StatusCode,
|
||||
Status: response.Status,
|
||||
@@ -224,6 +267,14 @@ func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, error) {
|
||||
var resp ShowResponse
|
||||
if err := c.do(ctx, http.MethodPost, "/api/show", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *Client) Heartbeat(ctx context.Context) error {
|
||||
if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil {
|
||||
return err
|
||||
|
225
api/client.py
Normal file
225
api/client.py
Normal file
@@ -0,0 +1,225 @@
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
|
||||
BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
||||
|
||||
# Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses.
|
||||
# The final response object will include statistics and additional data from the request. Use the callback function to override
|
||||
# the default handler.
|
||||
def generate(model_name, prompt, system=None, template=None, context=None, options=None, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/generate"
|
||||
payload = {
|
||||
"model": model_name,
|
||||
"prompt": prompt,
|
||||
"system": system,
|
||||
"template": template,
|
||||
"context": context,
|
||||
"options": options
|
||||
}
|
||||
|
||||
# Remove keys with None values
|
||||
payload = {k: v for k, v in payload.items() if v is not None}
|
||||
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Creating a variable to hold the context history of the final chunk
|
||||
final_context = None
|
||||
|
||||
# Variable to hold concatenated response strings if no callback is provided
|
||||
full_response = ""
|
||||
|
||||
# Iterating over the response line by line and displaying the details
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the details
|
||||
chunk = json.loads(line)
|
||||
|
||||
# If a callback function is provided, call it with the chunk
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
# If this is not the last chunk, add the "response" field value to full_response and print it
|
||||
if not chunk.get("done"):
|
||||
response_piece = chunk.get("response", "")
|
||||
full_response += response_piece
|
||||
print(response_piece, end="", flush=True)
|
||||
|
||||
# Check if it's the last chunk (done is true)
|
||||
if chunk.get("done"):
|
||||
final_context = chunk.get("context")
|
||||
|
||||
# Return the full response and the final context
|
||||
return full_response, final_context
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None, None
|
||||
|
||||
# Create a model from a Modelfile. Use the callback function to override the default handler.
|
||||
def create(model_name, model_path, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/create"
|
||||
payload = {"name": model_name, "path": model_path}
|
||||
|
||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Iterating over the response line by line and displaying the status
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the status
|
||||
chunk = json.loads(line)
|
||||
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
print(f"Status: {chunk.get('status')}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
# Pull a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple
|
||||
# calls to will share the same download progress. Use the callback function to override the default handler.
|
||||
def pull(model_name, insecure=False, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/pull"
|
||||
payload = {
|
||||
"name": model_name,
|
||||
"insecure": insecure
|
||||
}
|
||||
|
||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Iterating over the response line by line and displaying the details
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the details
|
||||
chunk = json.loads(line)
|
||||
|
||||
# If a callback function is provided, call it with the chunk
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
# Print the status message directly to the console
|
||||
print(chunk.get('status', ''), end='', flush=True)
|
||||
|
||||
# If there's layer data, you might also want to print that (adjust as necessary)
|
||||
if 'digest' in chunk:
|
||||
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
||||
print(f" - Total: {chunk['total']}", end='', flush=True)
|
||||
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
||||
else:
|
||||
print()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
# Push a model to the model registry. Use the callback function to override the default handler.
|
||||
def push(model_name, insecure=False, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/push"
|
||||
payload = {
|
||||
"name": model_name,
|
||||
"insecure": insecure
|
||||
}
|
||||
|
||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Iterating over the response line by line and displaying the details
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the details
|
||||
chunk = json.loads(line)
|
||||
|
||||
# If a callback function is provided, call it with the chunk
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
# Print the status message directly to the console
|
||||
print(chunk.get('status', ''), end='', flush=True)
|
||||
|
||||
# If there's layer data, you might also want to print that (adjust as necessary)
|
||||
if 'digest' in chunk:
|
||||
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
||||
print(f" - Total: {chunk['total']}", end='', flush=True)
|
||||
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
||||
else:
|
||||
print()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
# List models that are available locally.
|
||||
def list():
|
||||
try:
|
||||
response = requests.get(f"{BASE_URL}/api/tags")
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
models = data.get('models', [])
|
||||
return models
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
# Copy a model. Creates a model with another name from an existing model.
|
||||
def copy(source, destination):
|
||||
try:
|
||||
# Create the JSON payload
|
||||
payload = {
|
||||
"source": source,
|
||||
"destination": destination
|
||||
}
|
||||
|
||||
response = requests.post(f"{BASE_URL}/api/copy", json=payload)
|
||||
response.raise_for_status()
|
||||
|
||||
# If the request was successful, return a message indicating that the copy was successful
|
||||
return "Copy successful"
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
# Delete a model and its data.
|
||||
def delete(model_name):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/delete"
|
||||
payload = {"name": model_name}
|
||||
response = requests.delete(url, json=payload)
|
||||
response.raise_for_status()
|
||||
return "Delete successful"
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
# Show info about a model.
|
||||
def show(model_name):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/show"
|
||||
payload = {"name": model_name}
|
||||
response = requests.post(url, json=payload)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse the JSON response and return it
|
||||
data = response.json()
|
||||
return data
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
def heartbeat():
|
||||
try:
|
||||
url = f"{BASE_URL}/"
|
||||
response = requests.head(url)
|
||||
response.raise_for_status()
|
||||
return "Ollama is running"
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return "Ollama is not running"
|
||||
|
||||
|
167
api/types.go
167
api/types.go
@@ -3,11 +3,9 @@ package api
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -38,6 +36,7 @@ type GenerateRequest struct {
|
||||
System string `json:"system"`
|
||||
Template string `json:"template"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
@@ -54,14 +53,27 @@ type EmbeddingResponse struct {
|
||||
}
|
||||
|
||||
type CreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type DeleteRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowResponse struct {
|
||||
License string `json:"license,omitempty"`
|
||||
Modelfile string `json:"modelfile,omitempty"`
|
||||
Parameters string `json:"parameters,omitempty"`
|
||||
Template string `json:"template,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
}
|
||||
|
||||
type CopyRequest struct {
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
@@ -72,13 +84,14 @@ type PullRequest struct {
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type ProgressResponse struct {
|
||||
Status string `json:"status"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Total int `json:"total,omitempty"`
|
||||
Completed int `json:"completed,omitempty"`
|
||||
Total int64 `json:"total,omitempty"`
|
||||
Completed int64 `json:"completed,omitempty"`
|
||||
}
|
||||
|
||||
type PushRequest struct {
|
||||
@@ -86,16 +99,18 @@ type PushRequest struct {
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
Models []ListResponseModel `json:"models"`
|
||||
Models []ModelResponse `json:"models"`
|
||||
}
|
||||
|
||||
type ListResponseModel struct {
|
||||
type ModelResponse struct {
|
||||
Name string `json:"name"`
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Size int `json:"size"`
|
||||
Size int64 `json:"size"`
|
||||
Digest string `json:"digest"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
@@ -105,15 +120,13 @@ type TokenResponse struct {
|
||||
type GenerateResponse struct {
|
||||
Model string `json:"model"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Response string `json:"response,omitempty"`
|
||||
Response string `json:"response"`
|
||||
|
||||
Done bool `json:"done"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
|
||||
TotalDuration time.Duration `json:"total_duration,omitempty"`
|
||||
LoadDuration time.Duration `json:"load_duration,omitempty"`
|
||||
SampleCount int `json:"sample_count,omitempty"`
|
||||
SampleDuration time.Duration `json:"sample_duration,omitempty"`
|
||||
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
|
||||
PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"`
|
||||
EvalCount int `json:"eval_count,omitempty"`
|
||||
@@ -129,15 +142,6 @@ func (r *GenerateResponse) Summary() {
|
||||
fmt.Fprintf(os.Stderr, "load duration: %v\n", r.LoadDuration)
|
||||
}
|
||||
|
||||
if r.SampleCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "sample count: %d token(s)\n", r.SampleCount)
|
||||
}
|
||||
|
||||
if r.SampleDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "sample duration: %s\n", r.SampleDuration)
|
||||
fmt.Fprintf(os.Stderr, "sample rate: %.2f tokens/s\n", float64(r.SampleCount)/r.SampleDuration.Seconds())
|
||||
}
|
||||
|
||||
if r.PromptEvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
|
||||
}
|
||||
@@ -157,15 +161,10 @@ func (r *GenerateResponse) Summary() {
|
||||
}
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Seed int `json:"seed,omitempty"`
|
||||
|
||||
// Backend options
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
|
||||
// Model options
|
||||
// Runner options which must be set when the model is loaded into memory
|
||||
type Runner struct {
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
NumKeep int `json:"num_keep,omitempty"`
|
||||
NumBatch int `json:"num_batch,omitempty"`
|
||||
NumGQA int `json:"num_gqa,omitempty"`
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
@@ -179,26 +178,34 @@ type Options struct {
|
||||
EmbeddingOnly bool `json:"embedding_only,omitempty"`
|
||||
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
|
||||
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
}
|
||||
|
||||
// Predict options
|
||||
RepeatLastN int `json:"repeat_last_n,omitempty"`
|
||||
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
|
||||
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float32 `json:"presence_penalty,omitempty"`
|
||||
Temperature float32 `json:"temperature,omitempty"`
|
||||
type Options struct {
|
||||
Runner
|
||||
|
||||
// Predict options used at runtime
|
||||
NumKeep int `json:"num_keep,omitempty"`
|
||||
Seed int `json:"seed,omitempty"`
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP float32 `json:"top_p,omitempty"`
|
||||
TFSZ float32 `json:"tfs_z,omitempty"`
|
||||
TypicalP float32 `json:"typical_p,omitempty"`
|
||||
RepeatLastN int `json:"repeat_last_n,omitempty"`
|
||||
Temperature float32 `json:"temperature,omitempty"`
|
||||
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
|
||||
PresencePenalty float32 `json:"presence_penalty,omitempty"`
|
||||
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
|
||||
Mirostat int `json:"mirostat,omitempty"`
|
||||
MirostatTau float32 `json:"mirostat_tau,omitempty"`
|
||||
MirostatEta float32 `json:"mirostat_eta,omitempty"`
|
||||
PenalizeNewline bool `json:"penalize_newline,omitempty"`
|
||||
Stop []string `json:"stop,omitempty"`
|
||||
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
}
|
||||
|
||||
var ErrInvalidOpts = fmt.Errorf("invalid options")
|
||||
|
||||
func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
||||
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct
|
||||
@@ -212,55 +219,57 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
invalidOpts := []string{}
|
||||
for key, val := range m {
|
||||
if opt, ok := jsonOpts[key]; ok {
|
||||
field := valueOpts.FieldByName(opt.Name)
|
||||
if field.IsValid() && field.CanSet() {
|
||||
if val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch field.Kind() {
|
||||
case reflect.Int:
|
||||
// when JSON unmarshals numbers, it uses float64 by default, not int
|
||||
val, ok := val.(float64)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to int, skipped", key)
|
||||
continue
|
||||
switch t := val.(type) {
|
||||
case int64:
|
||||
field.SetInt(t)
|
||||
case float64:
|
||||
// when JSON unmarshals numbers, it uses float64, not int
|
||||
field.SetInt(int64(t))
|
||||
default:
|
||||
return fmt.Errorf("option %q must be of type integer", key)
|
||||
}
|
||||
field.SetInt(int64(val))
|
||||
case reflect.Bool:
|
||||
val, ok := val.(bool)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to bool, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type boolean", key)
|
||||
}
|
||||
field.SetBool(val)
|
||||
case reflect.Float32:
|
||||
// JSON unmarshals to float64
|
||||
val, ok := val.(float64)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to float32, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type float32", key)
|
||||
}
|
||||
field.SetFloat(val)
|
||||
case reflect.String:
|
||||
val, ok := val.(string)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to string, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type string", key)
|
||||
}
|
||||
field.SetString(val)
|
||||
case reflect.Slice:
|
||||
// JSON unmarshals to []interface{}, not []string
|
||||
val, ok := val.([]interface{})
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to slice, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type array", key)
|
||||
}
|
||||
// convert []interface{} to []string
|
||||
slice := make([]string, len(val))
|
||||
for i, item := range val {
|
||||
str, ok := item.(string)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to slice of strings, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of an array of strings", key)
|
||||
}
|
||||
slice[i] = str
|
||||
}
|
||||
@@ -269,45 +278,53 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
invalidOpts = append(invalidOpts, key)
|
||||
}
|
||||
}
|
||||
|
||||
if len(invalidOpts) > 0 {
|
||||
return fmt.Errorf("%w: %v", ErrInvalidOpts, strings.Join(invalidOpts, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DefaultOptions() Options {
|
||||
return Options{
|
||||
Seed: -1,
|
||||
|
||||
UseNUMA: false,
|
||||
|
||||
NumCtx: 2048,
|
||||
NumKeep: -1,
|
||||
NumBatch: 512,
|
||||
NumGPU: 1,
|
||||
NumGQA: 1,
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMMap: true,
|
||||
UseMLock: false,
|
||||
RopeFrequencyBase: 10000.0,
|
||||
RopeFrequencyScale: 1.0,
|
||||
EmbeddingOnly: true,
|
||||
|
||||
RepeatLastN: 64,
|
||||
RepeatPenalty: 1.1,
|
||||
FrequencyPenalty: 0.0,
|
||||
PresencePenalty: 0.0,
|
||||
// options set on request to runner
|
||||
NumPredict: -1,
|
||||
NumKeep: -1,
|
||||
Temperature: 0.8,
|
||||
TopK: 40,
|
||||
TopP: 0.9,
|
||||
TFSZ: 1.0,
|
||||
TypicalP: 1.0,
|
||||
RepeatLastN: 64,
|
||||
RepeatPenalty: 1.1,
|
||||
PresencePenalty: 0.0,
|
||||
FrequencyPenalty: 0.0,
|
||||
Mirostat: 0,
|
||||
MirostatTau: 5.0,
|
||||
MirostatEta: 0.1,
|
||||
PenalizeNewline: true,
|
||||
Seed: -1,
|
||||
|
||||
NumThread: runtime.NumCPU(),
|
||||
Runner: Runner{
|
||||
// options set when the model is loaded
|
||||
NumCtx: 2048,
|
||||
RopeFrequencyBase: 10000.0,
|
||||
RopeFrequencyScale: 1.0,
|
||||
NumBatch: 512,
|
||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||
NumGQA: 1,
|
||||
NumThread: 0, // let the runtime decide
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMLock: false,
|
||||
UseMMap: true,
|
||||
UseNUMA: false,
|
||||
EmbeddingOnly: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -27,7 +27,6 @@ const config: ForgeConfig = {
|
||||
path.join(__dirname, './assets/iconDarkTemplate@2x.png'),
|
||||
path.join(__dirname, './assets/iconDarkUpdateTemplate.png'),
|
||||
path.join(__dirname, './assets/iconDarkUpdateTemplate@2x.png'),
|
||||
...(process.platform === 'darwin' ? ['../llama/ggml-metal.metal'] : []),
|
||||
],
|
||||
...(process.env.SIGN
|
||||
? {
|
||||
@@ -48,16 +47,6 @@ const config: ForgeConfig = {
|
||||
},
|
||||
rebuildConfig: {},
|
||||
makers: [new MakerSquirrel({}), new MakerZIP({}, ['darwin'])],
|
||||
publishers: [
|
||||
new PublisherGithub({
|
||||
repository: {
|
||||
name: 'ollama',
|
||||
owner: 'jmorganca',
|
||||
},
|
||||
draft: false,
|
||||
prerelease: true,
|
||||
}),
|
||||
],
|
||||
hooks: {
|
||||
readPackageJson: async (_, packageJson) => {
|
||||
return { ...packageJson, version: process.env.VERSION || packageJson.version }
|
||||
|
992
app/package-lock.json
generated
992
app/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -46,7 +46,7 @@
|
||||
"chmodr": "^1.2.0",
|
||||
"copy-webpack-plugin": "^11.0.0",
|
||||
"css-loader": "^6.8.1",
|
||||
"electron": "25.2.0",
|
||||
"electron": "25.9.2",
|
||||
"eslint": "^8.43.0",
|
||||
"eslint-plugin-import": "^2.27.5",
|
||||
"fork-ts-checker-webpack-plugin": "^7.3.0",
|
||||
|
@@ -5,7 +5,7 @@ import winston from 'winston'
|
||||
import 'winston-daily-rotate-file'
|
||||
import * as path from 'path'
|
||||
|
||||
import { analytics, id } from './telemetry'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { installed } from './install'
|
||||
|
||||
require('@electron/remote/main').initialize()
|
||||
@@ -158,17 +158,60 @@ function restart() {
|
||||
app.on('before-quit', () => {
|
||||
if (proc) {
|
||||
proc.off('exit', restart)
|
||||
proc.kill()
|
||||
proc.kill('SIGINT') // send SIGINT signal to the server, which also stops any loaded llms
|
||||
}
|
||||
})
|
||||
|
||||
const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${
|
||||
process.arch
|
||||
}&version=${app.getVersion()}&id=${id()}`
|
||||
|
||||
let latest = ''
|
||||
async function isNewReleaseAvailable() {
|
||||
try {
|
||||
const response = await fetch(updateURL)
|
||||
|
||||
if (!response.ok) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (response.status === 204) {
|
||||
return false
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
const url = data?.url
|
||||
if (!url) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (latest === url) {
|
||||
return false
|
||||
}
|
||||
|
||||
latest = url
|
||||
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error(`update check failed - ${error}`)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
async function checkUpdate() {
|
||||
const available = await isNewReleaseAvailable()
|
||||
if (available) {
|
||||
logger.info('checking for update')
|
||||
autoUpdater.checkForUpdates()
|
||||
}
|
||||
}
|
||||
|
||||
function init() {
|
||||
if (app.isPackaged) {
|
||||
heartbeat()
|
||||
autoUpdater.checkForUpdates()
|
||||
checkUpdate()
|
||||
setInterval(() => {
|
||||
heartbeat()
|
||||
autoUpdater.checkForUpdates()
|
||||
checkUpdate()
|
||||
}, 60 * 60 * 1000)
|
||||
}
|
||||
|
||||
@@ -234,28 +277,22 @@ app.on('window-all-closed', () => {
|
||||
}
|
||||
})
|
||||
|
||||
// In this file you can include the rest of your app's specific main process
|
||||
// code. You can also put them in separate files and import them here.
|
||||
let aid = ''
|
||||
try {
|
||||
aid = id()
|
||||
} catch (e) {}
|
||||
function id(): string {
|
||||
const id = store.get('id') as string
|
||||
|
||||
autoUpdater.setFeedURL({
|
||||
url: `https://ollama.ai/api/update?os=${process.platform}&arch=${process.arch}&version=${app.getVersion()}&id=${aid}`,
|
||||
})
|
||||
if (id) {
|
||||
return id
|
||||
}
|
||||
|
||||
async function heartbeat() {
|
||||
analytics.track({
|
||||
anonymousId: aid,
|
||||
event: 'heartbeat',
|
||||
properties: {
|
||||
version: app.getVersion(),
|
||||
},
|
||||
})
|
||||
const uuid = uuidv4()
|
||||
store.set('id', uuid)
|
||||
return uuid
|
||||
}
|
||||
|
||||
autoUpdater.setFeedURL({ url: updateURL })
|
||||
|
||||
autoUpdater.on('error', e => {
|
||||
logger.error(`update check failed - ${e.message}`)
|
||||
console.error(`update check failed - ${e.message}`)
|
||||
})
|
||||
|
||||
|
@@ -1,19 +0,0 @@
|
||||
import { Analytics } from '@segment/analytics-node'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import Store from 'electron-store'
|
||||
|
||||
const store = new Store()
|
||||
|
||||
export const analytics = new Analytics({ writeKey: process.env.TELEMETRY_WRITE_KEY || '<empty>' })
|
||||
|
||||
export function id(): string {
|
||||
const id = store.get('id') as string
|
||||
|
||||
if (id) {
|
||||
return id
|
||||
}
|
||||
|
||||
const uuid = uuidv4()
|
||||
store.set('id', uuid)
|
||||
return uuid
|
||||
}
|
694
cmd/cmd.go
694
cmd/cmd.go
File diff suppressed because it is too large
Load Diff
@@ -3,3 +3,4 @@
|
||||
- [Modelfile](./modelfile.md)
|
||||
- [How to develop Ollama](./development.md)
|
||||
- [API](./api.md)
|
||||
- [Tutorials](./tutorials.md)
|
||||
|
183
docs/api.md
183
docs/api.md
@@ -3,25 +3,32 @@
|
||||
## Endpoints
|
||||
|
||||
- [Generate a completion](#generate-a-completion)
|
||||
- [Create a model](#create-a-model)
|
||||
- [List local models](#list-local-models)
|
||||
- [Copy a model](#copy-a-model)
|
||||
- [Delete a model](#delete-a-model)
|
||||
- [Pull a model](#pull-a-model)
|
||||
- [Create a Model](#create-a-model)
|
||||
- [List Local Models](#list-local-models)
|
||||
- [Show Model Information](#show-model-information)
|
||||
- [Copy a Model](#copy-a-model)
|
||||
- [Delete a Model](#delete-a-model)
|
||||
- [Pull a Model](#pull-a-model)
|
||||
- [Push a Model](#push-a-model)
|
||||
- [Generate Embeddings](#generate-embeddings)
|
||||
|
||||
## Conventions
|
||||
|
||||
### Model names
|
||||
|
||||
Model names follow a `model:tag` format. Some examples are `orca:3b-q4_1` and `llama2:70b`. The tag is optional and if not provided will default to `latest`. The tag is used to identify a specific version.
|
||||
Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
|
||||
|
||||
### Durations
|
||||
|
||||
All durations are returned in nanoseconds.
|
||||
|
||||
### Streaming responses
|
||||
|
||||
Certain endpoints stream responses as JSON objects delineated with the newline (`\n`) character.
|
||||
|
||||
## Generate a completion
|
||||
|
||||
```
|
||||
```shell
|
||||
POST /api/generate
|
||||
```
|
||||
|
||||
@@ -32,15 +39,17 @@ Generate a response for a given prompt with a provided model. This is a streamin
|
||||
- `model`: (required) the [model name](#model-names)
|
||||
- `prompt`: the prompt to generate a response for
|
||||
|
||||
Advanced parameters:
|
||||
Advanced parameters (optional):
|
||||
|
||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||
- `system`: system prompt to (overrides what is defined in the `Modelfile`)
|
||||
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
|
||||
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
|
||||
- `stream`: if `false` the response will be be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2:7b",
|
||||
"prompt": "Why is the sky blue?"
|
||||
@@ -70,6 +79,8 @@ The final response in the stream also includes additional data about the generat
|
||||
- `prompt_eval_duration`: time spent in nanoseconds evaluating the prompt
|
||||
- `eval_count`: number of tokens the response
|
||||
- `eval_duration`: time in nanoseconds spent generating the response
|
||||
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
|
||||
- `response`: empty if the response was streamed, if not streamed, this will contain the full response
|
||||
|
||||
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`.
|
||||
|
||||
@@ -77,6 +88,8 @@ To calculate how fast the response is generated in tokens per second (token/s),
|
||||
{
|
||||
"model": "llama2:7b",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"response": "",
|
||||
"context": [1, 2, 3],
|
||||
"done": true,
|
||||
"total_duration": 5589157167,
|
||||
"load_duration": 3013701500,
|
||||
@@ -91,7 +104,7 @@ To calculate how fast the response is generated in tokens per second (token/s),
|
||||
|
||||
## Create a Model
|
||||
|
||||
```
|
||||
```shell
|
||||
POST /api/create
|
||||
```
|
||||
|
||||
@@ -101,10 +114,11 @@ Create a model from a [`Modelfile`](./modelfile.md)
|
||||
|
||||
- `name`: name of the model to create
|
||||
- `path`: path to the Modelfile
|
||||
- `stream`: (optional) if `false` the response will be be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/create -d '{
|
||||
"name": "mario",
|
||||
"path": "~/Modelfile"
|
||||
@@ -113,7 +127,7 @@ curl -X POST http://localhost:11434/api/create -d '{
|
||||
|
||||
### Response
|
||||
|
||||
A stream of JSON objects. When finished, `status` is `success`
|
||||
A stream of JSON objects. When finished, `status` is `success`.
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -123,7 +137,7 @@ A stream of JSON objects. When finished, `status` is `success`
|
||||
|
||||
## List Local Models
|
||||
|
||||
```
|
||||
```shell
|
||||
GET /api/tags
|
||||
```
|
||||
|
||||
@@ -131,7 +145,7 @@ List models that are available locally.
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
```shell
|
||||
curl http://localhost:11434/api/tags
|
||||
```
|
||||
|
||||
@@ -154,9 +168,40 @@ curl http://localhost:11434/api/tags
|
||||
}
|
||||
```
|
||||
|
||||
## Show Model Information
|
||||
|
||||
```shell
|
||||
POST /api/show
|
||||
```
|
||||
|
||||
Show details about a model including modelfile, template, parameters, license, and system prompt.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to show
|
||||
|
||||
### Request
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/show -d '{
|
||||
"name": "llama2:7b"
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"license": "<contents of license block>",
|
||||
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llama2:latest\n\nFROM /Users/username/.ollama/models/blobs/sha256:8daa9615cce30c259a9555b1cc250d461d1bc69980a274b44d7eda0be78076d8\nTEMPLATE \"\"\"[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] \"\"\"\nSYSTEM \"\"\"\"\"\"\nPARAMETER stop [INST]\nPARAMETER stop [/INST]\nPARAMETER stop <<SYS>>\nPARAMETER stop <</SYS>>\n",
|
||||
"parameters": "stop [INST]\nstop [/INST]\nstop <<SYS>>\nstop <</SYS>>",
|
||||
"template": "[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] "
|
||||
}
|
||||
```
|
||||
|
||||
## Copy a Model
|
||||
|
||||
```
|
||||
```shell
|
||||
POST /api/copy
|
||||
```
|
||||
|
||||
@@ -164,7 +209,7 @@ Copy a model. Creates a model with another name from an existing model.
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
```shell
|
||||
curl http://localhost:11434/api/copy -d '{
|
||||
"source": "llama2:7b",
|
||||
"destination": "llama2-backup"
|
||||
@@ -173,7 +218,7 @@ curl http://localhost:11434/api/copy -d '{
|
||||
|
||||
## Delete a Model
|
||||
|
||||
```
|
||||
```shell
|
||||
DELETE /api/delete
|
||||
```
|
||||
|
||||
@@ -185,7 +230,7 @@ Delete a model and its data.
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
```shell
|
||||
curl -X DELETE http://localhost:11434/api/delete -d '{
|
||||
"name": "llama2:13b"
|
||||
}'
|
||||
@@ -193,19 +238,21 @@ curl -X DELETE http://localhost:11434/api/delete -d '{
|
||||
|
||||
## Pull a Model
|
||||
|
||||
```
|
||||
```shell
|
||||
POST /api/pull
|
||||
```
|
||||
|
||||
Download a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple calls to will share the same download progress.
|
||||
Download a model from the ollama library. Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to pull
|
||||
- `insecure`: (optional) allow insecure connections to the library. Only use this if you are pulling from your own library during development.
|
||||
- `stream`: (optional) if `false` the response will be be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/pull -d '{
|
||||
"name": "llama2:7b"
|
||||
}'
|
||||
@@ -220,3 +267,97 @@ curl -X POST http://localhost:11434/api/pull -d '{
|
||||
"total": 2142590208
|
||||
}
|
||||
```
|
||||
|
||||
## Push a Model
|
||||
|
||||
```shell
|
||||
POST /api/push
|
||||
```
|
||||
|
||||
Upload a model to a model library. Requires registering for ollama.ai and adding a public key first.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to push in the form of `<namespace>/<model>:<tag>`
|
||||
- `insecure`: (optional) allow insecure connections to the library. Only use this if you are pushing to your library during development.
|
||||
- `stream`: (optional) if `false` the response will be be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Request
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/push -d '{
|
||||
"name": "mattw/pygmalion:latest"
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
Streaming response that starts with:
|
||||
|
||||
```json
|
||||
{ "status": "retrieving manifest" }
|
||||
```
|
||||
|
||||
and then:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "starting upload",
|
||||
"digest": "sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
|
||||
"total": 1928429856
|
||||
}
|
||||
```
|
||||
|
||||
Then there is a series of uploading responses:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "starting upload",
|
||||
"digest": "sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
|
||||
"total": 1928429856
|
||||
}
|
||||
```
|
||||
|
||||
Finally, when the upload is complete:
|
||||
|
||||
```json
|
||||
{"status":"pushing manifest"}
|
||||
{"status":"success"}
|
||||
```
|
||||
|
||||
## Generate Embeddings
|
||||
|
||||
```shell
|
||||
POST /api/embeddings
|
||||
```
|
||||
|
||||
Generate embeddings from a model
|
||||
|
||||
### Parameters
|
||||
|
||||
- `model`: name of model to generate embeddings from
|
||||
- `prompt`: text to generate embeddings for
|
||||
|
||||
Advanced parameters:
|
||||
|
||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||
|
||||
### Request
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/embeddings -d '{
|
||||
"model": "llama2:7b",
|
||||
"prompt": "Here is an article about llamas..."
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"embedding": [
|
||||
0.5670403838157654, 0.009260174818336964, 0.23178744316101074, -0.2916173040866852, -0.8924556970596313,
|
||||
0.8785552978515625, -0.34576427936553955, 0.5742510557174683, -0.04222835972905159, -0.137906014919281
|
||||
]
|
||||
}
|
||||
```
|
||||
|
@@ -1,44 +1,39 @@
|
||||
# Development
|
||||
|
||||
- Install cmake or (optionally, required tools for GPUs)
|
||||
- run `go generate ./...`
|
||||
- run `go build .`
|
||||
|
||||
Install required tools:
|
||||
|
||||
```
|
||||
brew install go
|
||||
- cmake version 3.24 or higher
|
||||
- go version 1.20 or higher
|
||||
- gcc version 11.4.0 or higher
|
||||
|
||||
```bash
|
||||
brew install go cmake gcc
|
||||
```
|
||||
|
||||
Enable CGO:
|
||||
Get the required libraries:
|
||||
|
||||
```bash
|
||||
go generate ./...
|
||||
```
|
||||
export CGO_ENABLED=1
|
||||
```
|
||||
|
||||
You will also need a C/C++ compiler such as GCC for MacOS and Linux or Mingw-w64 GCC for Windows.
|
||||
|
||||
Then build ollama:
|
||||
|
||||
```
|
||||
```bash
|
||||
go build .
|
||||
```
|
||||
|
||||
Now you can run `ollama`:
|
||||
|
||||
```
|
||||
```bash
|
||||
./ollama
|
||||
```
|
||||
|
||||
## Releasing
|
||||
## Building on Linux with GPU support
|
||||
|
||||
To release a new version of Ollama you'll need to set some environment variables:
|
||||
|
||||
- `GITHUB_TOKEN`: your GitHub token
|
||||
- `APPLE_IDENTITY`: the Apple signing identity (macOS only)
|
||||
- `APPLE_ID`: your Apple ID
|
||||
- `APPLE_PASSWORD`: your Apple ID app-specific password
|
||||
- `APPLE_TEAM_ID`: the Apple team ID for the signing identity
|
||||
- `TELEMETRY_WRITE_KEY`: segment write key for telemetry
|
||||
|
||||
Then run the publish script with the target version:
|
||||
|
||||
```
|
||||
VERSION=0.0.2 ./scripts/publish.sh
|
||||
```
|
||||
- Install cmake and nvidia-cuda-toolkit
|
||||
- run `go generate ./...`
|
||||
- run `go build .`
|
||||
|
23
docs/faq.md
23
docs/faq.md
@@ -1,17 +1,34 @@
|
||||
# FAQ
|
||||
|
||||
## How can I view the logs?
|
||||
|
||||
On macOS:
|
||||
|
||||
```
|
||||
cat ~/.ollama/logs/server.log
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```
|
||||
journalctl -u ollama
|
||||
```
|
||||
|
||||
If you're running `ollama serve` directly, the logs will be printed to the console.
|
||||
|
||||
## How can I expose the Ollama server?
|
||||
|
||||
```
|
||||
```bash
|
||||
OLLAMA_HOST=0.0.0.0:11435 ollama serve
|
||||
```
|
||||
|
||||
By default, Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0`. To support more origins, you can use the `OLLAMA_ORIGINS` environment variable:
|
||||
|
||||
```
|
||||
```bash
|
||||
OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com ollama serve
|
||||
```
|
||||
|
||||
## Where are models stored?
|
||||
|
||||
Raw model data is stored under `~/.ollama/models`.
|
||||
- macOS: Raw model data is stored under `~/.ollama/models`.
|
||||
- Linux: Raw model data is stored under `/usr/share/ollama/.ollama/models`
|
||||
|
163
docs/import.md
Normal file
163
docs/import.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# Import a model
|
||||
|
||||
This guide walks through importing a PyTorch, Safetensors or GGUF model.
|
||||
|
||||
## Supported models
|
||||
|
||||
Ollama supports a set of model architectures, with support for more coming soon:
|
||||
|
||||
- Llama & Mistral
|
||||
- Falcon & RW
|
||||
- GPT-NeoX
|
||||
- BigCode
|
||||
|
||||
To view a model's architecture, check the `config.json` file in its HuggingFace repo. You should see an entry under `architectures` (e.g. `LlamaForCausalLM`).
|
||||
|
||||
## Importing
|
||||
|
||||
### Step 1: Clone the HuggingFace repository (optional)
|
||||
|
||||
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
||||
|
||||
```
|
||||
git lfs install
|
||||
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
|
||||
cd Mistral-7B-Instruct-v0.1
|
||||
```
|
||||
|
||||
### Step 2: Convert and quantize to a `.bin` file (optional, for PyTorch and Safetensors)
|
||||
|
||||
If the model is in PyTorch or Safetensors format, a [Docker image](https://hub.docker.com/r/ollama/quantize) with the tooling required to convert and quantize models is available.
|
||||
|
||||
First, Install [Docker](https://www.docker.com/get-started/).
|
||||
|
||||
Next, to convert and quantize your model, run:
|
||||
|
||||
```
|
||||
docker run --rm -v .:/model ollama/quantize -q q4_0 /model
|
||||
```
|
||||
|
||||
This will output two files into the directory:
|
||||
|
||||
- `f16.bin`: the model converted to GGUF
|
||||
- `q4_0.bin` the model quantized to a 4-bit quantization (we will use this file to create the Ollama model)
|
||||
|
||||
### Step 3: Write a `Modelfile`
|
||||
|
||||
Next, create a `Modelfile` for your model. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more.
|
||||
|
||||
```
|
||||
FROM ./q4_0.bin
|
||||
```
|
||||
|
||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
||||
|
||||
```
|
||||
FROM ./q4_0.bin
|
||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||
```
|
||||
|
||||
### Step 4: Create the Ollama model
|
||||
|
||||
Finally, create a model from your `Modelfile`:
|
||||
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
Next, test the model with `ollama run`:
|
||||
|
||||
```
|
||||
ollama run example "What is your favourite condiment?"
|
||||
```
|
||||
|
||||
### Step 5: Publish your model (optional – early alpha)
|
||||
|
||||
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
||||
|
||||
1. Create [an account](https://ollama.ai/signup)
|
||||
2. Run `cat ~/.ollama/id_ed25519.pub` to view your Ollama public key. Copy this to the clipboard.
|
||||
3. Add your public key to your [Ollama account](https://ollama.ai/settings/keys)
|
||||
|
||||
Next, copy your model to your username's namespace:
|
||||
|
||||
```
|
||||
ollama cp example <your username>/example
|
||||
```
|
||||
|
||||
Then push the model:
|
||||
|
||||
```
|
||||
ollama push <your username>/example
|
||||
```
|
||||
|
||||
After publishing, your model will be available at `https://ollama.ai/<your username>/example`.
|
||||
|
||||
## Quantization reference
|
||||
|
||||
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
|
||||
|
||||
- `q2_K`
|
||||
- `q3_K`
|
||||
- `q3_K_S`
|
||||
- `q3_K_M`
|
||||
- `q3_K_L`
|
||||
- `q4_0` (recommended)
|
||||
- `q4_1`
|
||||
- `q4_K`
|
||||
- `q4_K_S`
|
||||
- `q4_K_M`
|
||||
- `q5_0`
|
||||
- `q5_1`
|
||||
- `q5_K`
|
||||
- `q5_K_S`
|
||||
- `q5_K_M`
|
||||
- `q6_K`
|
||||
- `q8_0`
|
||||
|
||||
## Manually converting & quantizing models
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Start by cloning the `llama.cpp` repo to your machine in another directory:
|
||||
|
||||
```
|
||||
git clone https://github.com/ggerganov/llama.cpp.git
|
||||
cd llama.cpp
|
||||
```
|
||||
|
||||
Next, install the Python dependencies:
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Finally, build the `quantize` tool:
|
||||
|
||||
```
|
||||
make quantize
|
||||
```
|
||||
|
||||
### Convert the model
|
||||
|
||||
Run the correct conversion script for your model architecture:
|
||||
|
||||
```shell
|
||||
# LlamaForCausalLM or MistralForCausalLM
|
||||
python convert.py <path to model directory>
|
||||
|
||||
# FalconForCausalLM
|
||||
python convert-falcon-hf-to-gguf.py <path to model directory>
|
||||
|
||||
# GPTNeoXForCausalLM
|
||||
python convert-falcon-hf-to-gguf.py <path to model directory>
|
||||
|
||||
# GPTBigCodeForCausalLM
|
||||
python convert-starcoder-hf-to-gguf.py <path to model directory>
|
||||
```
|
||||
|
||||
### Quantize the model
|
||||
|
||||
```
|
||||
quantize <path to model dir>/ggml-model-f32.bin <path to model dir>/q4_0.bin q4_0
|
||||
```
|
82
docs/linux.md
Normal file
82
docs/linux.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Installing Ollama on Linux
|
||||
|
||||
> Note: A one line installer for Ollama is available by running:
|
||||
>
|
||||
> ```bash
|
||||
> curl https://ollama.ai/install.sh | sh
|
||||
> ```
|
||||
|
||||
## Download the `ollama` binary
|
||||
|
||||
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
|
||||
|
||||
```bash
|
||||
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||
sudo chmod +x /usr/bin/ollama
|
||||
```
|
||||
|
||||
## Start Ollama
|
||||
|
||||
Start Ollama by running `ollama serve`:
|
||||
|
||||
```bash
|
||||
ollama serve
|
||||
```
|
||||
|
||||
Once Ollama is running, run a model in another terminal session:
|
||||
|
||||
```bash
|
||||
ollama run llama2
|
||||
```
|
||||
|
||||
## Install CUDA drivers (optional – for Nvidia GPUs)
|
||||
|
||||
[Download and install](https://developer.nvidia.com/cuda-downloads) CUDA.
|
||||
|
||||
Verify that the drivers are installed by running the following command, which should print details about your GPU:
|
||||
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
## Adding Ollama as a startup service (optional)
|
||||
|
||||
Create a user for Ollama:
|
||||
|
||||
```bash
|
||||
sudo useradd -r -s /bin/false -m -d /usr/share/ollama ollama
|
||||
```
|
||||
|
||||
Create a service file in `/etc/systemd/system/ollama.service`:
|
||||
|
||||
```ini
|
||||
[Unit]
|
||||
Description=Ollama Service
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/ollama serve
|
||||
User=ollama
|
||||
Group=ollama
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
Environment="HOME=/usr/share/ollama"
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
```
|
||||
|
||||
Then start the service:
|
||||
|
||||
```bash
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable ollama
|
||||
```
|
||||
|
||||
### Viewing logs
|
||||
|
||||
To view logs of Ollama running as a startup service, run:
|
||||
|
||||
```bash
|
||||
journalctl -u ollama
|
||||
```
|
@@ -1,6 +1,6 @@
|
||||
# Ollama Model File
|
||||
|
||||
> Note: this model file syntax is in development
|
||||
> Note: this `Modelfile` syntax is in development
|
||||
|
||||
A model file is the blueprint to create and share models with Ollama.
|
||||
|
||||
@@ -17,12 +17,13 @@ A model file is the blueprint to create and share models with Ollama.
|
||||
- [TEMPLATE](#template)
|
||||
- [Template Variables](#template-variables)
|
||||
- [SYSTEM](#system)
|
||||
- [ADAPTER](#adapter)
|
||||
- [LICENSE](#license)
|
||||
- [Notes](#notes)
|
||||
|
||||
## Format
|
||||
|
||||
The format of the Modelfile:
|
||||
The format of the `Modelfile`:
|
||||
|
||||
```modelfile
|
||||
# comment
|
||||
@@ -35,13 +36,14 @@ INSTRUCTION arguments
|
||||
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
||||
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
||||
| [`SYSTEM`](#system) | Specifies the system prompt that will be set in the template. |
|
||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||
|
||||
## Examples
|
||||
|
||||
An example of a model file creating a mario blueprint:
|
||||
An example of a `Modelfile` creating a mario blueprint:
|
||||
|
||||
```
|
||||
```modelfile
|
||||
FROM llama2
|
||||
# sets the temperature to 1 [higher is more creative, lower is more coherent]
|
||||
PARAMETER temperature 1
|
||||
@@ -54,9 +56,9 @@ SYSTEM You are Mario from super mario bros, acting as an assistant.
|
||||
|
||||
To use this:
|
||||
|
||||
1. Save it as a file (eg. `Modelfile`)
|
||||
2. `ollama create NAME -f <location of the file eg. ./Modelfile>'`
|
||||
3. `ollama run NAME`
|
||||
1. Save it as a file (e.g. `Modelfile`)
|
||||
2. `ollama create choose-a-model-name -f <location of the file e.g. ./Modelfile>'`
|
||||
3. `ollama run choose-a-model-name`
|
||||
4. Start using the model!
|
||||
|
||||
More examples are available in the [examples directory](../examples).
|
||||
@@ -65,34 +67,34 @@ More examples are available in the [examples directory](../examples).
|
||||
|
||||
### FROM (Required)
|
||||
|
||||
The FROM instruction defines the base model to use when creating a model.
|
||||
The `FROM` instruction defines the base model to use when creating a model.
|
||||
|
||||
```
|
||||
```modelfile
|
||||
FROM <model name>:<tag>
|
||||
```
|
||||
|
||||
#### Build from llama2
|
||||
|
||||
```
|
||||
```modelfile
|
||||
FROM llama2
|
||||
```
|
||||
|
||||
A list of available base models:
|
||||
<https://github.com/jmorganca/ollama#model-library>
|
||||
|
||||
#### Build from a bin file
|
||||
#### Build from a `bin` file
|
||||
|
||||
```
|
||||
```modelfile
|
||||
FROM ./ollama-model.bin
|
||||
```
|
||||
|
||||
This bin file location should be specified as an absolute path or relative to the Modelfile location.
|
||||
This bin file location should be specified as an absolute path or relative to the `Modelfile` location.
|
||||
|
||||
### PARAMETER
|
||||
|
||||
The `PARAMETER` instruction defines a parameter that can be set when the model is run.
|
||||
|
||||
```
|
||||
```modelfile
|
||||
PARAMETER <parameter> <parametervalue>
|
||||
```
|
||||
|
||||
@@ -104,19 +106,22 @@ PARAMETER <parameter> <parametervalue>
|
||||
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
|
||||
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
|
||||
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
|
||||
| num_gpu | The number of GPUs to use. On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 1 |
|
||||
| num_gqa | The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b | int | num_gqa 1 |
|
||||
| num_gpu | The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 50 |
|
||||
| num_thread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | num_thread 8 |
|
||||
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
||||
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
|
||||
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
|
||||
| stop | Sets the stop tokens to use. | string | stop "AI assistant:" |
|
||||
| seed | Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) | int | seed 42 |
|
||||
| stop | Sets the stop sequences to use. | string | stop "AI assistant:" |
|
||||
| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 |
|
||||
| num_predict | Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) | int | num_predict 42 |
|
||||
| top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 |
|
||||
| top_p | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) | float | top_p 0.9 |
|
||||
|
||||
### TEMPLATE
|
||||
|
||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific.
|
||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
|
||||
|
||||
#### Template Variables
|
||||
|
||||
@@ -126,7 +131,7 @@ PARAMETER <parameter> <parametervalue>
|
||||
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
|
||||
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
|
||||
|
||||
```
|
||||
```modelfile
|
||||
TEMPLATE """
|
||||
{{- if .First }}
|
||||
### System:
|
||||
@@ -146,15 +151,23 @@ SYSTEM """<system message>"""
|
||||
|
||||
The `SYSTEM` instruction specifies the system prompt to be used in the template, if applicable.
|
||||
|
||||
```
|
||||
```modelfile
|
||||
SYSTEM """<system message>"""
|
||||
```
|
||||
|
||||
### ADAPTER
|
||||
|
||||
The `ADAPTER` instruction specifies the LoRA adapter to apply to the base model. The value of this instruction should be an absolute path or a path relative to the Modelfile and the file must be in a GGML file format. The adapter should be tuned from the base model otherwise the behaviour is undefined.
|
||||
|
||||
```modelfile
|
||||
ADAPTER ./ollama-lora.bin
|
||||
```
|
||||
|
||||
### LICENSE
|
||||
|
||||
The `LICENSE` instruction allows you to specify the legal license under which the model used with this Modelfile is shared or distributed.
|
||||
|
||||
```
|
||||
```modelfile
|
||||
LICENSE """
|
||||
<license text>
|
||||
"""
|
||||
@@ -162,5 +175,5 @@ LICENSE """
|
||||
|
||||
## Notes
|
||||
|
||||
- the **modelfile is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
|
||||
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.
|
||||
- the **`Modelfile` is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
|
||||
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.
|
||||
|
8
docs/tutorials.md
Normal file
8
docs/tutorials.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Tutorials
|
||||
|
||||
Here is a list of ways you can use Ollama with other tools to build interesting applications.
|
||||
|
||||
- [Using LangChain with Ollama in JavaScript](./tutorials/langchainjs.md)
|
||||
- [Using LangChain with Ollama in Python](./tutorials/langchainpy.md)
|
||||
|
||||
Also be sure to check out the [examples](../examples) directory for more ways to use Ollama.
|
73
docs/tutorials/langchainjs.md
Normal file
73
docs/tutorials/langchainjs.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Using LangChain with Ollama using JavaScript
|
||||
|
||||
In this tutorial, we are going to use JavaScript with LangChain and Ollama to learn about something just a touch more recent. In August 2023, there was a series of wildfires on Maui. There is no way an LLM trained before that time can know about this, since their training data would not include anything as recent as that. So we can find the [Wikipedia article about the fires](https://en.wikipedia.org/wiki/2023_Hawaii_wildfires) and ask questions about the contents.
|
||||
|
||||
To get started, let's just use **LangChain** to ask a simple question to a model. To do this with JavaScript, we need to install **LangChain**:
|
||||
|
||||
```bash
|
||||
npm install langchain
|
||||
```
|
||||
|
||||
Now we can start building out our JavaScript:
|
||||
|
||||
```javascript
|
||||
import { Ollama } from "langchain/llms/ollama";
|
||||
|
||||
const ollama = new Ollama({
|
||||
baseUrl: "http://localhost:11434",
|
||||
model: "llama2",
|
||||
});
|
||||
|
||||
const answer = await ollama.call(`why is the sky blue?`);
|
||||
|
||||
console.log(answer);
|
||||
```
|
||||
|
||||
That will get us the same thing as if we ran `ollama run llama2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's build that part of the app.
|
||||
|
||||
```javascript
|
||||
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
|
||||
|
||||
const loader = new CheerioWebBaseLoader("https://en.wikipedia.org/wiki/2023_Hawaii_wildfires");
|
||||
const data = loader.load();
|
||||
```
|
||||
|
||||
That will load the document. Although this page is smaller than the Odyssey, it is certainly bigger than the context size for most LLMs. So we are going to need to split into smaller pieces, and then select just the pieces relevant to our question. This is a great use for a vector datastore. In this example, we will use the **MemoryVectorStore** that is part of **LangChain**. But there is one more thing we need to get the content into the datastore. We have to run an embeddings process that converts the tokens in the text into a series of vectors. And for that, we are going to use **Tensorflow**. There is a lot of stuff going on in this one. First, install the **Tensorflow** components that we need.
|
||||
|
||||
```javascript
|
||||
npm install @tensorflow/tfjs-core@3.6.0 @tensorflow/tfjs-converter@3.6.0 @tensorflow-models/universal-sentence-encoder@1.3.3 @tensorflow/tfjs-node@4.10.0
|
||||
```
|
||||
|
||||
If you just install those components without the version numbers, it will install the latest versions, but there are conflicts within **Tensorflow**, so you need to install the compatible versions.
|
||||
|
||||
```javascript
|
||||
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
|
||||
import { MemoryVectorStore } from "langchain/vectorstores/memory";
|
||||
import "@tensorflow/tfjs-node";
|
||||
import { TensorFlowEmbeddings } from "langchain/embeddings/tensorflow";
|
||||
|
||||
// Split the text into 500 character chunks. And overlap each chunk by 20 characters
|
||||
const textSplitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: 500,
|
||||
chunkOverlap: 20
|
||||
});
|
||||
const splitDocs = await textSplitter.splitDocuments(data);
|
||||
|
||||
// Then use the TensorFlow Embedding to store these chunks in the datastore
|
||||
const vectorStore = await MemoryVectorStore.fromDocuments(splitDocs, new TensorFlowEmbeddings());
|
||||
```
|
||||
|
||||
To connect the datastore to a question asked to a LLM, we need to use the concept at the heart of **LangChain**: the chain. Chains are a way to connect a number of activities together to accomplish a particular tasks. There are a number of chain types available, but for this tutorial we are using the **RetrievalQAChain**.
|
||||
|
||||
```javascript
|
||||
import { RetrievalQAChain } from "langchain/chains";
|
||||
|
||||
const retriever = vectorStore.asRetriever();
|
||||
const chain = RetrievalQAChain.fromLLM(ollama, retriever);
|
||||
const result = await chain.call({query: "When was Hawaii's request for a major disaster declaration approved?"});
|
||||
console.log(result.text)
|
||||
```
|
||||
|
||||
So we created a retriever, which is a way to return the chunks that match a query from a datastore. And then connect the retriever and the model via a chain. Finally, we send a query to the chain, which results in an answer using our document as a source. The answer it returned was correct, August 10, 2023.
|
||||
|
||||
And that is a simple introduction to what you can do with **LangChain** and **Ollama.**
|
81
docs/tutorials/langchainpy.md
Normal file
81
docs/tutorials/langchainpy.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Using LangChain with Ollama in Python
|
||||
|
||||
Let's imagine we are studying the classics, such as **the Odyssey** by **Homer**. We might have a question about Neleus and his family. If you ask llama2 for that info, you may get something like:
|
||||
|
||||
> I apologize, but I'm a large language model, I cannot provide information on individuals or families that do not exist in reality. Neleus is not a real person or character, and therefore does not have a family or any other personal details. My apologies for any confusion. Is there anything else I can help you with?
|
||||
|
||||
This sounds like a typical censored response, but even llama2-uncensored gives a mediocre answer:
|
||||
|
||||
> Neleus was a legendary king of Pylos and the father of Nestor, one of the Argonauts. His mother was Clymene, a sea nymph, while his father was Neptune, the god of the sea.
|
||||
|
||||
So let's figure out how we can use **LangChain** with Ollama to ask our question to the actual document, the Odyssey by Homer, using Python.
|
||||
|
||||
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
|
||||
|
||||
`pip install langchain`
|
||||
|
||||
Then we can create a model and ask the question:
|
||||
|
||||
```python
|
||||
from langchain.llms import Ollama
|
||||
ollama = Ollama(base_url='http://localhost:11434',
|
||||
model="llama2")
|
||||
print(ollama("why is the sky blue"))
|
||||
```
|
||||
|
||||
Notice that we are defining the model and the base URL for Ollama.
|
||||
|
||||
Now let's load a document to ask questions against. I'll load up the Odyssey by Homer, which you can find at Project Gutenberg. We will need **WebBaseLoader** which is part of **LangChain** and loads text from any webpage. On my machine, I also needed to install **bs4** to get that to work, so run `pip install bs4`.
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
loader = WebBaseLoader("https://www.gutenberg.org/files/1727/1727-h/1727-h.htm")
|
||||
data = loader.load()
|
||||
```
|
||||
|
||||
This file is pretty big. Just the preface is 3000 tokens. Which means the full document won't fit into the context for the model. So we need to split it up into smaller pieces.
|
||||
|
||||
```python
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||
all_splits = text_splitter.split_documents(data)
|
||||
```
|
||||
|
||||
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. For now, we don't have embeddings built in to Ollama, though we will be adding that soon, so for now, we can use the GPT4All library for that. We will use ChromaDB in this example for a vector database. `pip install GPT4All chromadb`
|
||||
|
||||
```python
|
||||
from langchain.embeddings import GPT4AllEmbeddings
|
||||
from langchain.vectorstores import Chroma
|
||||
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
|
||||
```
|
||||
|
||||
Now let's ask a question from the document. **Who was Neleus, and who is in his family?** Neleus is a character in the Odyssey, and the answer can be found in our text.
|
||||
|
||||
```python
|
||||
question="Who is Neleus and who is in Neleus' family?"
|
||||
docs = vectorstore.similarity_search(question)
|
||||
len(docs)
|
||||
```
|
||||
|
||||
This will output the number of matches for chunks of data similar to the search.
|
||||
|
||||
The next thing is to send the question and the relevant parts of the docs to the model to see if we can get a good answer. But we are stitching two parts of the process together, and that is called a chain. This means we need to define a chain:
|
||||
|
||||
```python
|
||||
from langchain.chains import RetrievalQA
|
||||
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
|
||||
qachain({"query": question})
|
||||
```
|
||||
|
||||
The answer received from this chain was:
|
||||
|
||||
> Neleus is a character in Homer's "Odyssey" and is mentioned in the context of Penelope's suitors. Neleus is the father of Chloris, who is married to Neleus and bears him several children, including Nestor, Chromius, Periclymenus, and Pero. Amphinomus, the son of Nisus, is also mentioned as a suitor of Penelope and is known for his good natural disposition and agreeable conversation.
|
||||
|
||||
It's not a perfect answer, as it implies Neleus married his daughter when actually Chloris "was the youngest daughter to Amphion son of Iasus and king of Minyan Orchomenus, and was Queen in Pylos".
|
||||
|
||||
I updated the chunk_overlap for the text splitter to 20 and tried again and got a much better answer:
|
||||
|
||||
> Neleus is a character in Homer's epic poem "The Odyssey." He is the husband of Chloris, who is the youngest daughter of Amphion son of Iasus and king of Minyan Orchomenus. Neleus has several children with Chloris, including Nestor, Chromius, Periclymenus, and Pero.
|
||||
|
||||
And that is a much better answer.
|
171
examples/.gitignore
vendored
Normal file
171
examples/.gitignore
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
node_modules
|
||||
# OSX
|
||||
.DS_STORE
|
||||
|
||||
# Models
|
||||
models/
|
||||
|
||||
# Local Chroma db
|
||||
.chroma/
|
||||
db/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
@@ -1,15 +1,3 @@
|
||||
# Examples
|
||||
|
||||
This directory contains examples that can be created and run with `ollama`.
|
||||
|
||||
To create a model:
|
||||
|
||||
```
|
||||
ollama create example -f <example file>
|
||||
```
|
||||
|
||||
To run a model:
|
||||
|
||||
```
|
||||
ollama run example
|
||||
```
|
||||
This directory contains different examples of using Ollama.
|
||||
|
0
examples/golang-simplegenerate/README.md
Normal file
0
examples/golang-simplegenerate/README.md
Normal file
27
examples/golang-simplegenerate/main.go
Normal file
27
examples/golang-simplegenerate/main.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
body := []byte(`{"model":"mistral"}`)
|
||||
resp, err := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(body))
|
||||
|
||||
if err != nil {
|
||||
fmt.Print(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
responseData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(string(responseData))
|
||||
|
||||
}
|
21
examples/langchain-python-rag-document/README.md
Normal file
21
examples/langchain-python-rag-document/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# LangChain Document QA
|
||||
|
||||
This example provides an interface for asking questions to a PDF document.
|
||||
|
||||
## Setup
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```
|
||||
python main.py
|
||||
```
|
||||
|
||||
A prompt will appear, where questions may be asked:
|
||||
|
||||
```
|
||||
Query: How many locations does WeWork have?
|
||||
```
|
61
examples/langchain-python-rag-document/main.py
Normal file
61
examples/langchain-python-rag-document/main.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from langchain.document_loaders import OnlinePDFLoader
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.embeddings import GPT4AllEmbeddings
|
||||
from langchain import PromptTemplate
|
||||
from langchain.llms import Ollama
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.chains import RetrievalQA
|
||||
import sys
|
||||
import os
|
||||
|
||||
class SuppressStdout:
|
||||
def __enter__(self):
|
||||
self._original_stdout = sys.stdout
|
||||
self._original_stderr = sys.stderr
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stderr = open(os.devnull, 'w')
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
sys.stdout.close()
|
||||
sys.stdout = self._original_stdout
|
||||
sys.stderr = self._original_stderr
|
||||
|
||||
# load the pdf and split it into chunks
|
||||
loader = OnlinePDFLoader("https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf")
|
||||
data = loader.load()
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||
all_splits = text_splitter.split_documents(data)
|
||||
|
||||
with SuppressStdout():
|
||||
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
|
||||
|
||||
while True:
|
||||
query = input("\nQuery: ")
|
||||
if query == "exit":
|
||||
break
|
||||
if query.strip() == "":
|
||||
continue
|
||||
|
||||
# Prompt
|
||||
template = """Use the following pieces of context to answer the question at the end.
|
||||
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
||||
Use three sentences maximum and keep the answer as concise as possible.
|
||||
{context}
|
||||
Question: {question}
|
||||
Helpful Answer:"""
|
||||
QA_CHAIN_PROMPT = PromptTemplate(
|
||||
input_variables=["context", "question"],
|
||||
template=template,
|
||||
)
|
||||
|
||||
llm = Ollama(model="llama2:13b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
||||
qa_chain = RetrievalQA.from_chain_type(
|
||||
llm,
|
||||
retriever=vectorstore.as_retriever(),
|
||||
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
|
||||
)
|
||||
|
||||
result = qa_chain({"query": query})
|
109
examples/langchain-python-rag-document/requirements.txt
Normal file
109
examples/langchain-python-rag-document/requirements.txt
Normal file
@@ -0,0 +1,109 @@
|
||||
absl-py==1.4.0
|
||||
aiohttp==3.8.5
|
||||
aiosignal==1.3.1
|
||||
anyio==3.7.1
|
||||
astunparse==1.6.3
|
||||
async-timeout==4.0.3
|
||||
attrs==23.1.0
|
||||
backoff==2.2.1
|
||||
beautifulsoup4==4.12.2
|
||||
bs4==0.0.1
|
||||
cachetools==5.3.1
|
||||
certifi==2023.7.22
|
||||
cffi==1.15.1
|
||||
chardet==5.2.0
|
||||
charset-normalizer==3.2.0
|
||||
Chroma==0.2.0
|
||||
chroma-hnswlib==0.7.2
|
||||
chromadb==0.4.5
|
||||
click==8.1.6
|
||||
coloredlogs==15.0.1
|
||||
cryptography==41.0.3
|
||||
dataclasses-json==0.5.14
|
||||
fastapi==0.99.1
|
||||
filetype==1.2.0
|
||||
flatbuffers==23.5.26
|
||||
frozenlist==1.4.0
|
||||
gast==0.4.0
|
||||
google-auth==2.22.0
|
||||
google-auth-oauthlib==1.0.0
|
||||
google-pasta==0.2.0
|
||||
gpt4all==1.0.8
|
||||
grpcio==1.57.0
|
||||
h11==0.14.0
|
||||
h5py==3.9.0
|
||||
httptools==0.6.0
|
||||
humanfriendly==10.0
|
||||
idna==3.4
|
||||
importlib-resources==6.0.1
|
||||
joblib==1.3.2
|
||||
keras==2.13.1
|
||||
langchain==0.0.261
|
||||
langsmith==0.0.21
|
||||
libclang==16.0.6
|
||||
lxml==4.9.3
|
||||
Markdown==3.4.4
|
||||
MarkupSafe==2.1.3
|
||||
marshmallow==3.20.1
|
||||
monotonic==1.6
|
||||
mpmath==1.3.0
|
||||
multidict==6.0.4
|
||||
mypy-extensions==1.0.0
|
||||
nltk==3.8.1
|
||||
numexpr==2.8.5
|
||||
numpy==1.24.3
|
||||
oauthlib==3.2.2
|
||||
onnxruntime==1.15.1
|
||||
openapi-schema-pydantic==1.2.4
|
||||
opt-einsum==3.3.0
|
||||
overrides==7.4.0
|
||||
packaging==23.1
|
||||
pdf2image==1.16.3
|
||||
pdfminer==20191125
|
||||
pdfminer.six==20221105
|
||||
Pillow==10.0.0
|
||||
posthog==3.0.1
|
||||
protobuf==4.24.0
|
||||
pulsar-client==3.2.0
|
||||
pyasn1==0.5.0
|
||||
pyasn1-modules==0.3.0
|
||||
pycparser==2.21
|
||||
pycryptodome==3.18.0
|
||||
pydantic==1.10.12
|
||||
PyPika==0.48.9
|
||||
python-dateutil==2.8.2
|
||||
python-dotenv==1.0.0
|
||||
python-magic==0.4.27
|
||||
PyYAML==6.0.1
|
||||
regex==2023.8.8
|
||||
requests==2.31.0
|
||||
requests-oauthlib==1.3.1
|
||||
rsa==4.9
|
||||
six==1.16.0
|
||||
sniffio==1.3.0
|
||||
soupsieve==2.4.1
|
||||
SQLAlchemy==2.0.19
|
||||
starlette==0.27.0
|
||||
sympy==1.12
|
||||
tabulate==0.9.0
|
||||
tenacity==8.2.2
|
||||
tensorboard==2.13.0
|
||||
tensorboard-data-server==0.7.1
|
||||
tensorflow==2.13.0
|
||||
tensorflow-estimator==2.13.0
|
||||
tensorflow-hub==0.14.0
|
||||
tensorflow-macos==2.13.0
|
||||
termcolor==2.3.0
|
||||
tokenizers==0.13.3
|
||||
tqdm==4.66.1
|
||||
typing-inspect==0.9.0
|
||||
typing_extensions==4.5.0
|
||||
unstructured==0.9.2
|
||||
urllib3==1.26.16
|
||||
uvicorn==0.23.2
|
||||
uvloop==0.17.0
|
||||
watchfiles==0.19.0
|
||||
websockets==11.0.3
|
||||
Werkzeug==2.3.6
|
||||
wrapt==1.15.0
|
||||
yarl==1.9.2
|
170
examples/langchain-python-rag-privategpt/.gitignore
vendored
Normal file
170
examples/langchain-python-rag-privategpt/.gitignore
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
# OSX
|
||||
.DS_STORE
|
||||
|
||||
# Models
|
||||
models/
|
||||
|
||||
# Local Chroma db
|
||||
.chroma/
|
||||
db/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
201
examples/langchain-python-rag-privategpt/LICENSE
Normal file
201
examples/langchain-python-rag-privategpt/LICENSE
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
91
examples/langchain-python-rag-privategpt/README.md
Normal file
91
examples/langchain-python-rag-privategpt/README.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# PrivateGPT with Llama 2 uncensored
|
||||
|
||||
https://github.com/jmorganca/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b
|
||||
|
||||
> Note: this example is a slightly modified version of PrivateGPT using models such as Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it, and you can find his GitHub repo [here](https://github.com/imartinez/privateGPT).
|
||||
|
||||
### Setup
|
||||
|
||||
Set up a virtual environment (optional):
|
||||
|
||||
```
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
Install the Python dependencies:
|
||||
|
||||
```shell
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Pull the model you'd like to use:
|
||||
|
||||
```
|
||||
ollama pull llama2-uncensored
|
||||
```
|
||||
|
||||
### Getting WeWork's latest quarterly earnings report (10-Q)
|
||||
|
||||
```
|
||||
mkdir source_documents
|
||||
curl https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf -o source_documents/wework.pdf
|
||||
```
|
||||
|
||||
### Ingesting files
|
||||
|
||||
```shell
|
||||
python ingest.py
|
||||
```
|
||||
|
||||
Output should look like this:
|
||||
|
||||
```shell
|
||||
Creating new vectorstore
|
||||
Loading documents from source_documents
|
||||
Loading new documents: 100%|██████████████████████| 1/1 [00:01<00:00, 1.73s/it]
|
||||
Loaded 1 new documents from source_documents
|
||||
Split into 90 chunks of text (max. 500 tokens each)
|
||||
Creating embeddings. May take some minutes...
|
||||
Using embedded DuckDB with persistence: data will be stored in: db
|
||||
Ingestion complete! You can now run privateGPT.py to query your documents
|
||||
```
|
||||
|
||||
### Ask questions
|
||||
|
||||
```shell
|
||||
python privateGPT.py
|
||||
|
||||
Enter a query: How many locations does WeWork have?
|
||||
|
||||
> Answer (took 17.7 s.):
|
||||
As of June 2023, WeWork has 777 locations worldwide, including 610 Consolidated Locations (as defined in the section entitled Key Performance Indicators).
|
||||
```
|
||||
|
||||
### Try a different model:
|
||||
|
||||
```
|
||||
ollama pull llama2:13b
|
||||
MODEL=llama2:13b python privateGPT.py
|
||||
```
|
||||
|
||||
## Adding more files
|
||||
|
||||
Put any and all your files into the `source_documents` directory
|
||||
|
||||
The supported extensions are:
|
||||
|
||||
- `.csv`: CSV,
|
||||
- `.docx`: Word Document,
|
||||
- `.doc`: Word Document,
|
||||
- `.enex`: EverNote,
|
||||
- `.eml`: Email,
|
||||
- `.epub`: EPub,
|
||||
- `.html`: HTML File,
|
||||
- `.md`: Markdown,
|
||||
- `.msg`: Outlook Message,
|
||||
- `.odt`: Open Document Text,
|
||||
- `.pdf`: Portable Document Format (PDF),
|
||||
- `.pptx` : PowerPoint Document,
|
||||
- `.ppt` : PowerPoint Document,
|
||||
- `.txt`: Text file (UTF-8),
|
12
examples/langchain-python-rag-privategpt/constants.py
Normal file
12
examples/langchain-python-rag-privategpt/constants.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import os
|
||||
from chromadb.config import Settings
|
||||
|
||||
# Define the folder for storing database
|
||||
PERSIST_DIRECTORY = os.environ.get('PERSIST_DIRECTORY', 'db')
|
||||
|
||||
# Define the Chroma settings
|
||||
CHROMA_SETTINGS = Settings(
|
||||
chroma_db_impl='duckdb+parquet',
|
||||
persist_directory=PERSIST_DIRECTORY,
|
||||
anonymized_telemetry=False
|
||||
)
|
161
examples/langchain-python-rag-privategpt/ingest.py
Executable file
161
examples/langchain-python-rag-privategpt/ingest.py
Executable file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import glob
|
||||
from typing import List
|
||||
from multiprocessing import Pool
|
||||
from tqdm import tqdm
|
||||
|
||||
from langchain.document_loaders import (
|
||||
CSVLoader,
|
||||
EverNoteLoader,
|
||||
PyMuPDFLoader,
|
||||
TextLoader,
|
||||
UnstructuredEmailLoader,
|
||||
UnstructuredEPubLoader,
|
||||
UnstructuredHTMLLoader,
|
||||
UnstructuredMarkdownLoader,
|
||||
UnstructuredODTLoader,
|
||||
UnstructuredPowerPointLoader,
|
||||
UnstructuredWordDocumentLoader,
|
||||
)
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.docstore.document import Document
|
||||
from constants import CHROMA_SETTINGS
|
||||
|
||||
|
||||
# Load environment variables
|
||||
persist_directory = os.environ.get('PERSIST_DIRECTORY', 'db')
|
||||
source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents')
|
||||
embeddings_model_name = os.environ.get('EMBEDDINGS_MODEL_NAME', 'all-MiniLM-L6-v2')
|
||||
chunk_size = 500
|
||||
chunk_overlap = 50
|
||||
|
||||
# Custom document loaders
|
||||
class MyElmLoader(UnstructuredEmailLoader):
|
||||
"""Wrapper to fallback to text/plain when default does not work"""
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Wrapper adding fallback for elm without html"""
|
||||
try:
|
||||
try:
|
||||
doc = UnstructuredEmailLoader.load(self)
|
||||
except ValueError as e:
|
||||
if 'text/html content not found in email' in str(e):
|
||||
# Try plain text
|
||||
self.unstructured_kwargs["content_source"]="text/plain"
|
||||
doc = UnstructuredEmailLoader.load(self)
|
||||
else:
|
||||
raise
|
||||
except Exception as e:
|
||||
# Add file_path to exception message
|
||||
raise type(e)(f"{self.file_path}: {e}") from e
|
||||
|
||||
return doc
|
||||
|
||||
|
||||
# Map file extensions to document loaders and their arguments
|
||||
LOADER_MAPPING = {
|
||||
".csv": (CSVLoader, {}),
|
||||
# ".docx": (Docx2txtLoader, {}),
|
||||
".doc": (UnstructuredWordDocumentLoader, {}),
|
||||
".docx": (UnstructuredWordDocumentLoader, {}),
|
||||
".enex": (EverNoteLoader, {}),
|
||||
".eml": (MyElmLoader, {}),
|
||||
".epub": (UnstructuredEPubLoader, {}),
|
||||
".html": (UnstructuredHTMLLoader, {}),
|
||||
".md": (UnstructuredMarkdownLoader, {}),
|
||||
".odt": (UnstructuredODTLoader, {}),
|
||||
".pdf": (PyMuPDFLoader, {}),
|
||||
".ppt": (UnstructuredPowerPointLoader, {}),
|
||||
".pptx": (UnstructuredPowerPointLoader, {}),
|
||||
".txt": (TextLoader, {"encoding": "utf8"}),
|
||||
# Add more mappings for other file extensions and loaders as needed
|
||||
}
|
||||
|
||||
|
||||
def load_single_document(file_path: str) -> List[Document]:
|
||||
ext = "." + file_path.rsplit(".", 1)[-1]
|
||||
if ext in LOADER_MAPPING:
|
||||
loader_class, loader_args = LOADER_MAPPING[ext]
|
||||
loader = loader_class(file_path, **loader_args)
|
||||
return loader.load()
|
||||
|
||||
raise ValueError(f"Unsupported file extension '{ext}'")
|
||||
|
||||
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
|
||||
"""
|
||||
Loads all documents from the source documents directory, ignoring specified files
|
||||
"""
|
||||
all_files = []
|
||||
for ext in LOADER_MAPPING:
|
||||
all_files.extend(
|
||||
glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
|
||||
)
|
||||
filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]
|
||||
|
||||
with Pool(processes=os.cpu_count()) as pool:
|
||||
results = []
|
||||
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
|
||||
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
|
||||
results.extend(docs)
|
||||
pbar.update()
|
||||
|
||||
return results
|
||||
|
||||
def process_documents(ignored_files: List[str] = []) -> List[Document]:
|
||||
"""
|
||||
Load documents and split in chunks
|
||||
"""
|
||||
print(f"Loading documents from {source_directory}")
|
||||
documents = load_documents(source_directory, ignored_files)
|
||||
if not documents:
|
||||
print("No new documents to load")
|
||||
exit(0)
|
||||
print(f"Loaded {len(documents)} new documents from {source_directory}")
|
||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
||||
texts = text_splitter.split_documents(documents)
|
||||
print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
|
||||
return texts
|
||||
|
||||
def does_vectorstore_exist(persist_directory: str) -> bool:
|
||||
"""
|
||||
Checks if vectorstore exists
|
||||
"""
|
||||
if os.path.exists(os.path.join(persist_directory, 'index')):
|
||||
if os.path.exists(os.path.join(persist_directory, 'chroma-collections.parquet')) and os.path.exists(os.path.join(persist_directory, 'chroma-embeddings.parquet')):
|
||||
list_index_files = glob.glob(os.path.join(persist_directory, 'index/*.bin'))
|
||||
list_index_files += glob.glob(os.path.join(persist_directory, 'index/*.pkl'))
|
||||
# At least 3 documents are needed in a working vectorstore
|
||||
if len(list_index_files) > 3:
|
||||
return True
|
||||
return False
|
||||
|
||||
def main():
|
||||
# Create embeddings
|
||||
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
|
||||
|
||||
if does_vectorstore_exist(persist_directory):
|
||||
# Update and store locally vectorstore
|
||||
print(f"Appending to existing vectorstore at {persist_directory}")
|
||||
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
|
||||
collection = db.get()
|
||||
texts = process_documents([metadata['source'] for metadata in collection['metadatas']])
|
||||
print(f"Creating embeddings. May take some minutes...")
|
||||
db.add_documents(texts)
|
||||
else:
|
||||
# Create and store locally vectorstore
|
||||
print("Creating new vectorstore")
|
||||
texts = process_documents()
|
||||
print(f"Creating embeddings. May take some minutes...")
|
||||
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)
|
||||
db.persist()
|
||||
db = None
|
||||
|
||||
print(f"Ingestion complete! You can now run privateGPT.py to query your documents")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
3833
examples/langchain-python-rag-privategpt/poetry.lock
generated
Normal file
3833
examples/langchain-python-rag-privategpt/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
71
examples/langchain-python-rag-privategpt/privateGPT.py
Executable file
71
examples/langchain-python-rag-privategpt/privateGPT.py
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python3
|
||||
from langchain.chains import RetrievalQA
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.llms import Ollama
|
||||
import os
|
||||
import argparse
|
||||
import time
|
||||
|
||||
model = os.environ.get("MODEL", "llama2-uncensored")
|
||||
# For embeddings model, the example uses a sentence-transformers model
|
||||
# https://www.sbert.net/docs/pretrained_models.html
|
||||
# "The all-mpnet-base-v2 model provides the best quality, while all-MiniLM-L6-v2 is 5 times faster and still offers good quality."
|
||||
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME", "all-MiniLM-L6-v2")
|
||||
persist_directory = os.environ.get("PERSIST_DIRECTORY", "db")
|
||||
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
|
||||
|
||||
from constants import CHROMA_SETTINGS
|
||||
|
||||
def main():
|
||||
# Parse the command line arguments
|
||||
args = parse_arguments()
|
||||
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
|
||||
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
|
||||
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
|
||||
# activate/deactivate the streaming StdOut callback for LLMs
|
||||
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
|
||||
|
||||
llm = Ollama(model=model, callbacks=callbacks)
|
||||
|
||||
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source)
|
||||
# Interactive questions and answers
|
||||
while True:
|
||||
query = input("\nEnter a query: ")
|
||||
if query == "exit":
|
||||
break
|
||||
if query.strip() == "":
|
||||
continue
|
||||
|
||||
# Get the answer from the chain
|
||||
start = time.time()
|
||||
res = qa(query)
|
||||
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
|
||||
end = time.time()
|
||||
|
||||
# Print the result
|
||||
print("\n\n> Question:")
|
||||
print(query)
|
||||
print(answer)
|
||||
|
||||
# Print the relevant sources used for the answer
|
||||
for document in docs:
|
||||
print("\n> " + document.metadata["source"] + ":")
|
||||
print(document.page_content)
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
|
||||
'using the power of LLMs.')
|
||||
parser.add_argument("--hide-source", "-S", action='store_true',
|
||||
help='Use this flag to disable printing of source documents used for answers.')
|
||||
|
||||
parser.add_argument("--mute-stream", "-M",
|
||||
action='store_true',
|
||||
help='Use this flag to disable the streaming StdOut callback for LLMs.')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
26
examples/langchain-python-rag-privategpt/pyproject.toml
Normal file
26
examples/langchain-python-rag-privategpt/pyproject.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
[tool.poetry]
|
||||
name = "privategpt"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Ivan Martinez <ivanmartit@gmail.com>"]
|
||||
license = "Apache Version 2.0"
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
langchain = "0.0.261"
|
||||
gpt4all = "^1.0.3"
|
||||
chromadb = "^0.3.26"
|
||||
PyMuPDF = "^1.22.5"
|
||||
python-dotenv = "^1.0.0"
|
||||
unstructured = "^0.8.0"
|
||||
extract-msg = "^0.41.5"
|
||||
tabulate = "^0.9.0"
|
||||
pandoc = "^2.3"
|
||||
pypandoc = "^1.11"
|
||||
tqdm = "^4.65.0"
|
||||
sentence-transformers = "^2.2.2"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
2002
examples/langchain-python-rag-privategpt/requirements.txt
Normal file
2002
examples/langchain-python-rag-privategpt/requirements.txt
Normal file
File diff suppressed because it is too large
Load Diff
15
examples/langchain-python-rag-websummary/README.md
Normal file
15
examples/langchain-python-rag-websummary/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# LangChain Web Summarization
|
||||
|
||||
This example summarizes a website
|
||||
|
||||
## Setup
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```
|
||||
python main.py
|
||||
```
|
12
examples/langchain-python-rag-websummary/main.py
Normal file
12
examples/langchain-python-rag-websummary/main.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from langchain.llms import Ollama
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
|
||||
loader = WebBaseLoader("https://ollama.ai/blog/run-llama2-uncensored-locally")
|
||||
docs = loader.load()
|
||||
|
||||
llm = Ollama(model="llama2")
|
||||
chain = load_summarize_chain(llm, chain_type="stuff")
|
||||
|
||||
result = chain.run(docs)
|
||||
print(result)
|
@@ -0,0 +1,2 @@
|
||||
langchain==0.0.259
|
||||
bs4==0.0.1
|
21
examples/langchain-python-simple/README.md
Normal file
21
examples/langchain-python-simple/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# LangChain
|
||||
|
||||
This example is a basic "hello world" of using LangChain with Ollama.
|
||||
|
||||
## Setup
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```
|
||||
python main.py
|
||||
```
|
||||
|
||||
Running this example will print the response for "hello":
|
||||
|
||||
```
|
||||
Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat?
|
||||
```
|
4
examples/langchain-python-simple/main.py
Normal file
4
examples/langchain-python-simple/main.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from langchain.llms import Ollama
|
||||
llm = Ollama(model="llama2")
|
||||
res = llm.predict("hello")
|
||||
print (res)
|
1
examples/langchain-python-simple/requirements.txt
Normal file
1
examples/langchain-python-simple/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
langchain==0.0.259
|
21
examples/langchain-typescript-simple/README.md
Normal file
21
examples/langchain-typescript-simple/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# LangChain
|
||||
|
||||
This example is a basic "hello world" of using LangChain with Ollama using Node.js and Typescript.
|
||||
|
||||
## Setup
|
||||
|
||||
```shell
|
||||
npm install
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```shell
|
||||
ts-node main.ts
|
||||
```
|
||||
|
||||
Running this example will print the response for "hello":
|
||||
|
||||
```plaintext
|
||||
Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat?
|
||||
```
|
15
examples/langchain-typescript-simple/main.ts
Normal file
15
examples/langchain-typescript-simple/main.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { Ollama} from 'langchain/llms/ollama';
|
||||
|
||||
async function main() {
|
||||
const ollama = new Ollama({
|
||||
model: 'mistral'
|
||||
// other parameters can be found at https://js.langchain.com/docs/api/llms_ollama/classes/Ollama
|
||||
})
|
||||
const stream = await ollama.stream("Hello");
|
||||
|
||||
for await (const chunk of stream) {
|
||||
process.stdout.write(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
997
examples/langchain-typescript-simple/package-lock.json
generated
Normal file
997
examples/langchain-typescript-simple/package-lock.json
generated
Normal file
@@ -0,0 +1,997 @@
|
||||
{
|
||||
"name": "with-langchain-typescript-simplegenerate",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"dependencies": {
|
||||
"langchain": "^0.0.165"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk": {
|
||||
"version": "0.6.2",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.6.2.tgz",
|
||||
"integrity": "sha512-fB9PUj9RFT+XjkL+E9Ol864ZIJi+1P8WnbHspN3N3/GK2uSzjd0cbVIKTGgf4v3N8MwaQu+UWnU7C4BG/fap/g==",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"digest-fetch": "^1.3.0",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "18.18.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.4.tgz",
|
||||
"integrity": "sha512-t3rNFBgJRugIhackit2mVcLfF6IRc0JE4oeizPQL8Zrm8n2WY/0wOdpOPhdtG0V9Q2TlW/axbF1MJ6z+Yj/kKQ=="
|
||||
},
|
||||
"node_modules/@types/node-fetch": {
|
||||
"version": "2.6.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.6.tgz",
|
||||
"integrity": "sha512-95X8guJYhfqiuVVhRFxVQcf4hW/2bCuoPwDasMf/531STFoNoWTT7YDnWdXHEZKqAGUigmpG31r2FE70LwnzJw==",
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
"form-data": "^4.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/retry": {
|
||||
"version": "0.12.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
|
||||
"integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="
|
||||
},
|
||||
"node_modules/@types/uuid": {
|
||||
"version": "9.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.5.tgz",
|
||||
"integrity": "sha512-xfHdwa1FMJ082prjSJpoEI57GZITiQz10r3vEJCHa2khEFQjKy91aWKz6+zybzssCvXUwE1LQWgWVwZ4nYUvHQ=="
|
||||
},
|
||||
"node_modules/abort-controller": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
|
||||
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
|
||||
"dependencies": {
|
||||
"event-target-shim": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.5"
|
||||
}
|
||||
},
|
||||
"node_modules/agentkeepalive": {
|
||||
"version": "4.5.0",
|
||||
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz",
|
||||
"integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==",
|
||||
"dependencies": {
|
||||
"humanize-ms": "^1.2.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ansi-styles": {
|
||||
"version": "5.2.0",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
|
||||
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/argparse": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
|
||||
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
|
||||
},
|
||||
"node_modules/asynckit": {
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
|
||||
},
|
||||
"node_modules/base-64": {
|
||||
"version": "0.1.0",
|
||||
"resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz",
|
||||
"integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA=="
|
||||
},
|
||||
"node_modules/base64-js": {
|
||||
"version": "1.5.1",
|
||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/feross"
|
||||
},
|
||||
{
|
||||
"type": "patreon",
|
||||
"url": "https://www.patreon.com/feross"
|
||||
},
|
||||
{
|
||||
"type": "consulting",
|
||||
"url": "https://feross.org/support"
|
||||
}
|
||||
]
|
||||
},
|
||||
"node_modules/binary-extensions": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
|
||||
"integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/binary-search": {
|
||||
"version": "1.3.6",
|
||||
"resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz",
|
||||
"integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA=="
|
||||
},
|
||||
"node_modules/camelcase": {
|
||||
"version": "6.3.0",
|
||||
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
|
||||
"integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/charenc": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz",
|
||||
"integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==",
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/combined-stream": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
||||
"dependencies": {
|
||||
"delayed-stream": "~1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/commander": {
|
||||
"version": "10.0.1",
|
||||
"resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz",
|
||||
"integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
}
|
||||
},
|
||||
"node_modules/crypt": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz",
|
||||
"integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==",
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/decamelize": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
|
||||
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/delayed-stream": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/digest-fetch": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz",
|
||||
"integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==",
|
||||
"dependencies": {
|
||||
"base-64": "^0.1.0",
|
||||
"md5": "^2.3.0"
|
||||
}
|
||||
},
|
||||
"node_modules/event-target-shim": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
|
||||
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/eventemitter3": {
|
||||
"version": "4.0.7",
|
||||
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
|
||||
"integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
|
||||
},
|
||||
"node_modules/expr-eval": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/expr-eval/-/expr-eval-2.0.2.tgz",
|
||||
"integrity": "sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg=="
|
||||
},
|
||||
"node_modules/flat": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz",
|
||||
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==",
|
||||
"bin": {
|
||||
"flat": "cli.js"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
||||
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data-encoder": {
|
||||
"version": "1.7.2",
|
||||
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
|
||||
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="
|
||||
},
|
||||
"node_modules/formdata-node": {
|
||||
"version": "4.4.1",
|
||||
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
|
||||
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
|
||||
"dependencies": {
|
||||
"node-domexception": "1.0.0",
|
||||
"web-streams-polyfill": "4.0.0-beta.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 12.20"
|
||||
}
|
||||
},
|
||||
"node_modules/humanize-ms": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
|
||||
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
|
||||
"dependencies": {
|
||||
"ms": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/is-any-array": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz",
|
||||
"integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ=="
|
||||
},
|
||||
"node_modules/is-buffer": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
|
||||
"integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
|
||||
},
|
||||
"node_modules/js-tiktoken": {
|
||||
"version": "1.0.7",
|
||||
"resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.7.tgz",
|
||||
"integrity": "sha512-biba8u/clw7iesNEWLOLwrNGoBP2lA+hTaBLs/D45pJdUPFXyxD6nhcDVtADChghv4GgyAiMKYMiRx7x6h7Biw==",
|
||||
"dependencies": {
|
||||
"base64-js": "^1.5.1"
|
||||
}
|
||||
},
|
||||
"node_modules/js-yaml": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
|
||||
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
|
||||
"dependencies": {
|
||||
"argparse": "^2.0.1"
|
||||
},
|
||||
"bin": {
|
||||
"js-yaml": "bin/js-yaml.js"
|
||||
}
|
||||
},
|
||||
"node_modules/jsonpointer": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz",
|
||||
"integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/langchain": {
|
||||
"version": "0.0.165",
|
||||
"resolved": "https://registry.npmjs.org/langchain/-/langchain-0.0.165.tgz",
|
||||
"integrity": "sha512-CpbNpjwaE+9lzjdw+pZz0VgnRrFivEgr7CVp9dDaAb5JpaJAA4V2v6uQ9ZPN+TSqupTQ79HFn2sfyZVEl2EG7Q==",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.6.2",
|
||||
"ansi-styles": "^5.0.0",
|
||||
"binary-extensions": "^2.2.0",
|
||||
"camelcase": "6",
|
||||
"decamelize": "^1.2.0",
|
||||
"expr-eval": "^2.0.2",
|
||||
"flat": "^5.0.2",
|
||||
"js-tiktoken": "^1.0.7",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jsonpointer": "^5.0.1",
|
||||
"langchainhub": "~0.0.6",
|
||||
"langsmith": "~0.0.31",
|
||||
"ml-distance": "^4.0.0",
|
||||
"object-hash": "^3.0.0",
|
||||
"openai": "~4.4.0",
|
||||
"openapi-types": "^12.1.3",
|
||||
"p-queue": "^6.6.2",
|
||||
"p-retry": "4",
|
||||
"uuid": "^9.0.0",
|
||||
"yaml": "^2.2.1",
|
||||
"zod": "^3.22.3",
|
||||
"zod-to-json-schema": "^3.20.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@aws-crypto/sha256-js": "^5.0.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.422.0",
|
||||
"@aws-sdk/client-dynamodb": "^3.310.0",
|
||||
"@aws-sdk/client-kendra": "^3.352.0",
|
||||
"@aws-sdk/client-lambda": "^3.310.0",
|
||||
"@aws-sdk/client-s3": "^3.310.0",
|
||||
"@aws-sdk/client-sagemaker-runtime": "^3.310.0",
|
||||
"@aws-sdk/client-sfn": "^3.310.0",
|
||||
"@aws-sdk/credential-provider-node": "^3.388.0",
|
||||
"@azure/storage-blob": "^12.15.0",
|
||||
"@clickhouse/client": "^0.0.14",
|
||||
"@cloudflare/ai": "^1.0.12",
|
||||
"@elastic/elasticsearch": "^8.4.0",
|
||||
"@getmetal/metal-sdk": "*",
|
||||
"@getzep/zep-js": "^0.7.0",
|
||||
"@gomomento/sdk": "^1.23.0",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@google-cloud/storage": "^6.10.1",
|
||||
"@huggingface/inference": "^1.5.1",
|
||||
"@mozilla/readability": "*",
|
||||
"@notionhq/client": "^2.2.10",
|
||||
"@opensearch-project/opensearch": "*",
|
||||
"@pinecone-database/pinecone": "^1.1.0",
|
||||
"@planetscale/database": "^1.8.0",
|
||||
"@qdrant/js-client-rest": "^1.2.0",
|
||||
"@raycast/api": "^1.55.2",
|
||||
"@smithy/eventstream-codec": "^2.0.5",
|
||||
"@smithy/protocol-http": "^3.0.6",
|
||||
"@smithy/signature-v4": "^2.0.10",
|
||||
"@smithy/util-utf8": "^2.0.0",
|
||||
"@supabase/postgrest-js": "^1.1.1",
|
||||
"@supabase/supabase-js": "^2.10.0",
|
||||
"@tensorflow-models/universal-sentence-encoder": "*",
|
||||
"@tensorflow/tfjs-converter": "*",
|
||||
"@tensorflow/tfjs-core": "*",
|
||||
"@upstash/redis": "^1.20.6",
|
||||
"@vercel/postgres": "^0.5.0",
|
||||
"@writerai/writer-sdk": "^0.40.2",
|
||||
"@xata.io/client": "^0.25.1",
|
||||
"@xenova/transformers": "^2.5.4",
|
||||
"@zilliz/milvus2-sdk-node": ">=2.2.7",
|
||||
"apify-client": "^2.7.1",
|
||||
"axios": "*",
|
||||
"cassandra-driver": "^4.6.4",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
"chromadb": "*",
|
||||
"cohere-ai": ">=6.0.0",
|
||||
"d3-dsv": "^2.0.0",
|
||||
"epub2": "^3.0.1",
|
||||
"faiss-node": "^0.3.0",
|
||||
"fast-xml-parser": "^4.2.7",
|
||||
"firebase-admin": "^11.9.0",
|
||||
"google-auth-library": "^8.9.0",
|
||||
"googleapis": "^126.0.1",
|
||||
"hnswlib-node": "^1.4.2",
|
||||
"html-to-text": "^9.0.5",
|
||||
"ignore": "^5.2.0",
|
||||
"ioredis": "^5.3.2",
|
||||
"jsdom": "*",
|
||||
"llmonitor": "*",
|
||||
"lodash": "^4.17.21",
|
||||
"mammoth": "*",
|
||||
"mongodb": "^5.2.0",
|
||||
"mysql2": "^3.3.3",
|
||||
"neo4j-driver": "*",
|
||||
"node-llama-cpp": "*",
|
||||
"notion-to-md": "^3.1.0",
|
||||
"pdf-parse": "1.1.1",
|
||||
"peggy": "^3.0.2",
|
||||
"pg": "^8.11.0",
|
||||
"pg-copy-streams": "^6.0.5",
|
||||
"pickleparser": "^0.1.0",
|
||||
"playwright": "^1.32.1",
|
||||
"portkey-ai": "^0.1.11",
|
||||
"puppeteer": "^19.7.2",
|
||||
"redis": "^4.6.4",
|
||||
"replicate": "^0.18.0",
|
||||
"sonix-speech-recognition": "^2.1.1",
|
||||
"srt-parser-2": "^1.2.2",
|
||||
"typeorm": "^0.3.12",
|
||||
"typesense": "^1.5.3",
|
||||
"usearch": "^1.1.1",
|
||||
"vectordb": "^0.1.4",
|
||||
"voy-search": "0.6.2",
|
||||
"weaviate-ts-client": "^1.4.0",
|
||||
"web-auth-library": "^1.0.3",
|
||||
"youtube-transcript": "^1.0.6",
|
||||
"youtubei.js": "^5.8.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@aws-crypto/sha256-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-bedrock-runtime": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-dynamodb": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-kendra": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-lambda": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-s3": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-sagemaker-runtime": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-sfn": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/credential-provider-node": {
|
||||
"optional": true
|
||||
},
|
||||
"@azure/storage-blob": {
|
||||
"optional": true
|
||||
},
|
||||
"@clickhouse/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@cloudflare/ai": {
|
||||
"optional": true
|
||||
},
|
||||
"@elastic/elasticsearch": {
|
||||
"optional": true
|
||||
},
|
||||
"@getmetal/metal-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@getzep/zep-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@gomomento/sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@google-ai/generativelanguage": {
|
||||
"optional": true
|
||||
},
|
||||
"@google-cloud/storage": {
|
||||
"optional": true
|
||||
},
|
||||
"@huggingface/inference": {
|
||||
"optional": true
|
||||
},
|
||||
"@mozilla/readability": {
|
||||
"optional": true
|
||||
},
|
||||
"@notionhq/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@opensearch-project/opensearch": {
|
||||
"optional": true
|
||||
},
|
||||
"@pinecone-database/pinecone": {
|
||||
"optional": true
|
||||
},
|
||||
"@planetscale/database": {
|
||||
"optional": true
|
||||
},
|
||||
"@qdrant/js-client-rest": {
|
||||
"optional": true
|
||||
},
|
||||
"@raycast/api": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/eventstream-codec": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/protocol-http": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/signature-v4": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/util-utf8": {
|
||||
"optional": true
|
||||
},
|
||||
"@supabase/postgrest-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@supabase/supabase-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow-models/universal-sentence-encoder": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow/tfjs-converter": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow/tfjs-core": {
|
||||
"optional": true
|
||||
},
|
||||
"@upstash/redis": {
|
||||
"optional": true
|
||||
},
|
||||
"@vercel/postgres": {
|
||||
"optional": true
|
||||
},
|
||||
"@writerai/writer-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@xata.io/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@xenova/transformers": {
|
||||
"optional": true
|
||||
},
|
||||
"@zilliz/milvus2-sdk-node": {
|
||||
"optional": true
|
||||
},
|
||||
"apify-client": {
|
||||
"optional": true
|
||||
},
|
||||
"axios": {
|
||||
"optional": true
|
||||
},
|
||||
"cassandra-driver": {
|
||||
"optional": true
|
||||
},
|
||||
"cheerio": {
|
||||
"optional": true
|
||||
},
|
||||
"chromadb": {
|
||||
"optional": true
|
||||
},
|
||||
"cohere-ai": {
|
||||
"optional": true
|
||||
},
|
||||
"d3-dsv": {
|
||||
"optional": true
|
||||
},
|
||||
"epub2": {
|
||||
"optional": true
|
||||
},
|
||||
"faiss-node": {
|
||||
"optional": true
|
||||
},
|
||||
"fast-xml-parser": {
|
||||
"optional": true
|
||||
},
|
||||
"firebase-admin": {
|
||||
"optional": true
|
||||
},
|
||||
"google-auth-library": {
|
||||
"optional": true
|
||||
},
|
||||
"googleapis": {
|
||||
"optional": true
|
||||
},
|
||||
"hnswlib-node": {
|
||||
"optional": true
|
||||
},
|
||||
"html-to-text": {
|
||||
"optional": true
|
||||
},
|
||||
"ignore": {
|
||||
"optional": true
|
||||
},
|
||||
"ioredis": {
|
||||
"optional": true
|
||||
},
|
||||
"jsdom": {
|
||||
"optional": true
|
||||
},
|
||||
"llmonitor": {
|
||||
"optional": true
|
||||
},
|
||||
"lodash": {
|
||||
"optional": true
|
||||
},
|
||||
"mammoth": {
|
||||
"optional": true
|
||||
},
|
||||
"mongodb": {
|
||||
"optional": true
|
||||
},
|
||||
"mysql2": {
|
||||
"optional": true
|
||||
},
|
||||
"neo4j-driver": {
|
||||
"optional": true
|
||||
},
|
||||
"node-llama-cpp": {
|
||||
"optional": true
|
||||
},
|
||||
"notion-to-md": {
|
||||
"optional": true
|
||||
},
|
||||
"pdf-parse": {
|
||||
"optional": true
|
||||
},
|
||||
"peggy": {
|
||||
"optional": true
|
||||
},
|
||||
"pg": {
|
||||
"optional": true
|
||||
},
|
||||
"pg-copy-streams": {
|
||||
"optional": true
|
||||
},
|
||||
"pickleparser": {
|
||||
"optional": true
|
||||
},
|
||||
"playwright": {
|
||||
"optional": true
|
||||
},
|
||||
"portkey-ai": {
|
||||
"optional": true
|
||||
},
|
||||
"puppeteer": {
|
||||
"optional": true
|
||||
},
|
||||
"redis": {
|
||||
"optional": true
|
||||
},
|
||||
"replicate": {
|
||||
"optional": true
|
||||
},
|
||||
"sonix-speech-recognition": {
|
||||
"optional": true
|
||||
},
|
||||
"srt-parser-2": {
|
||||
"optional": true
|
||||
},
|
||||
"typeorm": {
|
||||
"optional": true
|
||||
},
|
||||
"typesense": {
|
||||
"optional": true
|
||||
},
|
||||
"usearch": {
|
||||
"optional": true
|
||||
},
|
||||
"vectordb": {
|
||||
"optional": true
|
||||
},
|
||||
"voy-search": {
|
||||
"optional": true
|
||||
},
|
||||
"weaviate-ts-client": {
|
||||
"optional": true
|
||||
},
|
||||
"web-auth-library": {
|
||||
"optional": true
|
||||
},
|
||||
"youtube-transcript": {
|
||||
"optional": true
|
||||
},
|
||||
"youtubei.js": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/langchainhub": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/langchainhub/-/langchainhub-0.0.6.tgz",
|
||||
"integrity": "sha512-SW6105T+YP1cTe0yMf//7kyshCgvCTyFBMTgH2H3s9rTAR4e+78DA/BBrUL/Mt4Q5eMWui7iGuAYb3pgGsdQ9w=="
|
||||
},
|
||||
"node_modules/langsmith": {
|
||||
"version": "0.0.42",
|
||||
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.0.42.tgz",
|
||||
"integrity": "sha512-sFuN+e7E+pPBIRaRgFqZh/BRBWNHTZNAwi6uj4kydQawooCZYoJmM5snOkiQrhVSvAhgu6xFhLvmfvkPcKzD7w==",
|
||||
"dependencies": {
|
||||
"@types/uuid": "^9.0.1",
|
||||
"commander": "^10.0.1",
|
||||
"p-queue": "^6.6.2",
|
||||
"p-retry": "4",
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"bin": {
|
||||
"langsmith": "dist/cli/main.cjs"
|
||||
}
|
||||
},
|
||||
"node_modules/md5": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz",
|
||||
"integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==",
|
||||
"dependencies": {
|
||||
"charenc": "0.0.2",
|
||||
"crypt": "0.0.2",
|
||||
"is-buffer": "~1.1.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-db": {
|
||||
"version": "1.52.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
||||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-array-mean": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz",
|
||||
"integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==",
|
||||
"dependencies": {
|
||||
"ml-array-sum": "^1.1.6"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-array-sum": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz",
|
||||
"integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==",
|
||||
"dependencies": {
|
||||
"is-any-array": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-distance": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.1.tgz",
|
||||
"integrity": "sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw==",
|
||||
"dependencies": {
|
||||
"ml-array-mean": "^1.1.6",
|
||||
"ml-distance-euclidean": "^2.0.0",
|
||||
"ml-tree-similarity": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-distance-euclidean": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz",
|
||||
"integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q=="
|
||||
},
|
||||
"node_modules/ml-tree-similarity": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz",
|
||||
"integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==",
|
||||
"dependencies": {
|
||||
"binary-search": "^1.3.5",
|
||||
"num-sort": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
},
|
||||
"node_modules/node-domexception": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
|
||||
"integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/jimmywarting"
|
||||
},
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://paypal.me/jimmywarting"
|
||||
}
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=10.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/node-fetch": {
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
|
||||
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
|
||||
"dependencies": {
|
||||
"whatwg-url": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": "4.x || >=6.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"encoding": "^0.1.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"encoding": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/num-sort": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz",
|
||||
"integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/object-hash": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
|
||||
"integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/openai": {
|
||||
"version": "4.4.0",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-4.4.0.tgz",
|
||||
"integrity": "sha512-JN0t628Kh95T0IrXl0HdBqnlJg+4Vq0Bnh55tio+dfCnyzHvMLiWyCM9m726MAJD2YkDU4/8RQB6rNbEq9ct2w==",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"digest-fetch": "^1.3.0",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
},
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
}
|
||||
},
|
||||
"node_modules/openapi-types": {
|
||||
"version": "12.1.3",
|
||||
"resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz",
|
||||
"integrity": "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw=="
|
||||
},
|
||||
"node_modules/p-finally": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
|
||||
"integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==",
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/p-queue": {
|
||||
"version": "6.6.2",
|
||||
"resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz",
|
||||
"integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==",
|
||||
"dependencies": {
|
||||
"eventemitter3": "^4.0.4",
|
||||
"p-timeout": "^3.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/p-retry": {
|
||||
"version": "4.6.2",
|
||||
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
|
||||
"integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
|
||||
"dependencies": {
|
||||
"@types/retry": "0.12.0",
|
||||
"retry": "^0.13.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/p-timeout": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz",
|
||||
"integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==",
|
||||
"dependencies": {
|
||||
"p-finally": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/retry": {
|
||||
"version": "0.13.1",
|
||||
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
|
||||
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
|
||||
"engines": {
|
||||
"node": ">= 4"
|
||||
}
|
||||
},
|
||||
"node_modules/tr46": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
|
||||
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.2.2",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz",
|
||||
"integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==",
|
||||
"dev": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.17"
|
||||
}
|
||||
},
|
||||
"node_modules/uuid": {
|
||||
"version": "9.0.1",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
|
||||
"integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
|
||||
"funding": [
|
||||
"https://github.com/sponsors/broofa",
|
||||
"https://github.com/sponsors/ctavan"
|
||||
],
|
||||
"bin": {
|
||||
"uuid": "dist/bin/uuid"
|
||||
}
|
||||
},
|
||||
"node_modules/web-streams-polyfill": {
|
||||
"version": "4.0.0-beta.3",
|
||||
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
|
||||
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/webidl-conversions": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
|
||||
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
|
||||
},
|
||||
"node_modules/whatwg-url": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
|
||||
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
|
||||
"dependencies": {
|
||||
"tr46": "~0.0.3",
|
||||
"webidl-conversions": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/yaml": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz",
|
||||
"integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/zod": {
|
||||
"version": "3.22.4",
|
||||
"resolved": "https://registry.npmjs.org/zod/-/zod-3.22.4.tgz",
|
||||
"integrity": "sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/colinhacks"
|
||||
}
|
||||
},
|
||||
"node_modules/zod-to-json-schema": {
|
||||
"version": "3.21.4",
|
||||
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.21.4.tgz",
|
||||
"integrity": "sha512-fjUZh4nQ1s6HMccgIeE0VP4QG/YRGPmyjO9sAh890aQKPEk3nqbfUXhMFaC+Dr5KvYBm8BCyvfpZf2jY9aGSsw==",
|
||||
"peerDependencies": {
|
||||
"zod": "^3.21.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
8
examples/langchain-typescript-simple/package.json
Normal file
8
examples/langchain-typescript-simple/package.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"typescript": "^5.2.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"langchain": "^0.0.165"
|
||||
}
|
||||
}
|
@@ -1,8 +0,0 @@
|
||||
# Modelfile for creating a Midjourney prompts from a topic
|
||||
# This prompt was adapted from the original at https://www.greataiprompts.com/guide/midjourney/best-chatgpt-prompt-for-midjourney/
|
||||
# Run `ollama create mj -f ./Modelfile` and then `ollama run mj` and enter a topic
|
||||
|
||||
FROM nous-hermes
|
||||
SYSTEM """
|
||||
Embrace your role as an AI-powered creative assistant, employing Midjourney to manifest compelling AI-generated art. I will outline a specific image concept, and in response, you must produce an exhaustive, multifaceted prompt for Midjourney, ensuring every detail of the original concept is represented in your instructions. Midjourney doesn't do well with text, so after the prompt, give me instructions that I can use to create the titles in a image editor.
|
||||
"""
|
7
examples/modelfile-10tweets/Modelfile
Normal file
7
examples/modelfile-10tweets/Modelfile
Normal file
@@ -0,0 +1,7 @@
|
||||
# Modelfile for creating a list of ten tweets from a topic
|
||||
# Run `ollama create 10tweets -f ./Modelfile` and then `ollama run 10tweets` and enter a topic
|
||||
|
||||
FROM llama2
|
||||
SYSTEM """
|
||||
You are a content marketer who needs to come up with 10 short but succinct tweets. The answer should be a list of ten tweets. Each tweet can have a maximum of 280 characters and should include hashtags. Each user input will be a subject and you should expand it in ten creative ways. Never stop after just one tweet. Always include ten.
|
||||
"""
|
23
examples/modelfile-10tweets/README.md
Normal file
23
examples/modelfile-10tweets/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Ten Tweets Modelfile
|
||||
|
||||
This is a simple modelfile that generates ten tweets based off any topic.
|
||||
|
||||
```bash
|
||||
ollama create tentweets
|
||||
|
||||
ollama run tentweets
|
||||
>>> underwater basketweaving
|
||||
Great! Here are ten creative tweets about underwater basketweaving:
|
||||
|
||||
1. "Just discovered the ultimate stress-reliever: Underwater basketweaving! 🌊🧵 #UnderwaterBasketweaving #StressRelief"
|
||||
2. "Who needs meditation when you can do underwater basketweaving? 😴👀 #PeacefulDistraction #UnderwaterBasketweaving"
|
||||
3. "Just spent an hour in the pool and still managed to knot my basket. Goal: untangle it before next session. 💪🏽 #ChallengeAccepted #UnderwaterBasketweaving"
|
||||
4. "When life gives you lemons, make underwater basketweaving! 🍋🧵 #LemonadeLife #UnderwaterBasketweaving"
|
||||
5. "Just realized my underwater basketweaving skills could come in handy during a zombie apocalypse. 😂🧡 #SurvivalTips #UnderwaterBasketweaving"
|
||||
6. "I'm not lazy, I'm just conserving energy for my next underwater basketweaving session. 😴💤 #LazyDay #UnderwaterBasketweaving"
|
||||
7. "Just found my inner peace while doing underwater basketweaving. It's like meditation, but with knots! 🙏🧵 #Mindfulness #UnderwaterBasketweaving"
|
||||
8. "Why study for exams when you can do underwater basketweaving and forget all your worries? 😜🧵 #ProcrastinationStation #UnderwaterBasketweaving"
|
||||
9. "Just had to cut my underwater basketweaving session short due to a sudden urge to breathe. 🤯🌊 #AquaticAdventures #UnderwaterBasketweaving"
|
||||
10. "I'm not sure what's more impressive: my underwater basketweaving skills or the fact that I didn't drown trying to make this tweet. 😅🧵 #Accomplishment
|
||||
#UnderwaterBasketweaving"
|
||||
```
|
@@ -1,7 +1,7 @@
|
||||
# Modelfile for creating a devops engineer assistant
|
||||
# Run `ollama create devops-engineer -f ./Modelfile` and then `ollama run devops-engineer` and enter a topic
|
||||
|
||||
FROM llama2:13b
|
||||
FROM mistral
|
||||
PARAMETER temperature 1
|
||||
SYSTEM """
|
||||
You are a senior devops engineer, acting as an assistant. You offer help with cloud technologies like: Terraform, AWS, kubernetes, python. You answer with code examples when possible
|
40
examples/modelfile-devopsengineer/README.md
Normal file
40
examples/modelfile-devopsengineer/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# DevOps Engineer Modelfile
|
||||
|
||||
This is a simple modelfile that takes will respond to any questions that a senior DevOps engineer should know how to solve.
|
||||
|
||||
````text
|
||||
ollama create devopsengineer
|
||||
|
||||
ollama run devopsengineer
|
||||
>>> how can i proxy all connections from inside the office through a single server that caches large files. Provide a config file for nginx to achieve this?
|
||||
|
||||
Here is an example configuration file for Nginx that can be used to proxy all connections from inside the office network to a centralized cache server:
|
||||
```perl
|
||||
http {
|
||||
upstream cache_server {
|
||||
server <cache_server_ip>;
|
||||
server <cache_server_ip>;
|
||||
server <cache_server_ip>;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
location / {
|
||||
proxy_pass http://cache_server;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
In this configuration, we define an `upstream` block that lists the IP addresses of the cache servers. We then define a server block that listens on port 80 and routes all
|
||||
traffic to the `/` location to the cache servers using the `proxy_pass` directive. The `proxy_set_header` directives are used to preserve the source IP address of the client
|
||||
request when forwarding it to the cache server.
|
||||
|
||||
To use this configuration, you would need to replace the placeholder `<cache_server_ip>` with the actual IP addresses of your cache servers. You would also need to make sure
|
||||
that the cache servers are configured to accept incoming connections from the Nginx server and handle requests for files.
|
||||
|
||||
````
|
Before Width: | Height: | Size: 446 KiB After Width: | Height: | Size: 446 KiB |
11
examples/modelfile-midjourney/Modelfile
Normal file
11
examples/modelfile-midjourney/Modelfile
Normal file
@@ -0,0 +1,11 @@
|
||||
# Modelfile for creating a Midjourney prompts from a topic
|
||||
# This prompt was adapted from the original at https://www.greataiprompts.com/guide/midjourney/best-chatgpt-prompt-for-midjourney/
|
||||
# Run `ollama create mj -f ./Modelfile` and then `ollama run mj` and enter a topic
|
||||
|
||||
FROM zephyr
|
||||
PARAMETER temperature 0.8
|
||||
PARAMETER top_k 500
|
||||
PARAMETER top_p 0.9
|
||||
SYSTEM """
|
||||
Embrace your role as a creative illustrator. Based on a concept provided, you must produce a single paragraph with a multifaceted description of an image, ensuring significant details of the concept and more is represented in your instructions. You do not need to write complete sentences but rather short concepts with the following information: the level of detail that should be represented, an artistic style and maybe a specific name of a painter or illustrator, the ideal color pallete, lighting, mood, perspective, the setting, time of day, weather, the season, the time period, location, materials, the textures, patterns, lines, brushstrokes, techniques, the medium, the genre, the rendering style. Don't include everything and keep the description length under 250 words.
|
||||
"""
|
11
examples/modelfile-midjourney/README.md
Normal file
11
examples/modelfile-midjourney/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Midjourney Prompt Generator Modelfile
|
||||
|
||||
This simple modelfile will help create a prompt to feed to Midjourney.
|
||||
|
||||
```text
|
||||
ollama create midjourney
|
||||
|
||||
ollama run midjourney
|
||||
>>> a sports car in the mountains.
|
||||
A sleek, high-performance automobile cuts through a serpentine mountain landscape. The concept is a classic illustration of speed and power, depicted in the style of pop art by Andy Warhol. The color palette is dominated by bold, primary hues of red, blue, and yellow, with striking accent colors of white, black, and metallic shades. The lighting is bright and focused, casting sharp shadows on the rugged terrain. A sense of excitement and anticipation permeates throughout the scene, as the car navigates a treacherous course through the winding road. The perspective is low, allowing for a full view of the vehicle's sleek lines and intricate details. The setting takes place in the afternoon during a sunny day in autumn, as evidenced by the vibrant foliage on the mountainside. The time period is modern, with nods to classic car design. The materials are primarily digital, allowing for smooth curves and sharp contrasts. The textures are sleek and polished, with meticulously detailed lines and brushstrokes that accentuate the car's aerodynamic design. The patterns consist of geometric shapes and bold stripes, adding to the car's dynamic appeal. The genre is modern realism, with a focus on precision and detail. The rendering style is highly technical, capturing the nuances and subtleties of the vehicle and its surroundings in breathtaking detail.
|
||||
```
|
20
examples/modelfile-recipemaker/README.md
Normal file
20
examples/modelfile-recipemaker/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Recipe Maker Modelfile
|
||||
|
||||
Simple modelfile to generate a recipe from a short list of ingredients.
|
||||
|
||||
```
|
||||
ollama create recipemaker
|
||||
|
||||
ollama run recipemaker
|
||||
>>> chilli pepper, white chocolate, kale
|
||||
Ingredients:
|
||||
- 1 small chili pepper
|
||||
- 4 squares of white chocolate
|
||||
- handful of kale leaves
|
||||
|
||||
Instructions:
|
||||
1. In a blender or food processor, puree the chilies and white chocolate until smooth.
|
||||
2. Add the chopped kale leaves to the blender and pulse until well combined.
|
||||
3. Serve immediately as a dip for crackers or use it as an ingredient in your favorite recipe. The mixture of spicy chili pepper with sweet white chocolate and nutritious
|
||||
kale will make your taste buds dance with delight!
|
||||
```
|
28
examples/modelfile-sentiments/Modelfile
Normal file
28
examples/modelfile-sentiments/Modelfile
Normal file
@@ -0,0 +1,28 @@
|
||||
# Modelfile for creating a sentiment analyzer.
|
||||
# Run `ollama create sentiments -f pathtofile` and then `ollama run sentiments` and enter a topic
|
||||
|
||||
FROM orca
|
||||
TEMPLATE """
|
||||
{{- if .First }}
|
||||
### System:
|
||||
{{ .System }}
|
||||
{{- end }}
|
||||
### User:
|
||||
I hate it when my phone dies
|
||||
### Response:
|
||||
NEGATIVE
|
||||
### User:
|
||||
He is awesome
|
||||
### Response:
|
||||
POSITIVE
|
||||
### User:
|
||||
This is the link to the article
|
||||
### Response:
|
||||
NEUTRAL
|
||||
### User:
|
||||
{{ .Prompt }}
|
||||
|
||||
### Response:
|
||||
"""
|
||||
|
||||
SYSTEM """You are a sentiment analyzer. You will receive text and output only one word, either POSITIVE or NEGATIVE or NEUTRAL, depending on the sentiment of the text."""
|
25
examples/modelfile-sentiments/Readme.md
Normal file
25
examples/modelfile-sentiments/Readme.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Sentiments Modelfile
|
||||
|
||||
This is a simple sentiments analyzer using the Orca model. When you pull Orca from the registry, it has a Template already defined that looks like this:
|
||||
|
||||
```Modelfile
|
||||
{{- if .First }}
|
||||
### System:
|
||||
{{ .System }}
|
||||
{{- end }}
|
||||
|
||||
### User:
|
||||
{{ .Prompt }}
|
||||
|
||||
### Response:
|
||||
```
|
||||
|
||||
If we just wanted to have the text:
|
||||
|
||||
```Plaintext
|
||||
You are a sentiment analyzer. You will receive text and output only one word, either POSITIVE or NEGATIVE or NEUTRAL, depending on the sentiment of the text.
|
||||
```
|
||||
|
||||
then we could have put this in a SYSTEM block. But we want to provide examples which require updating the full Template. Any Modelfile you create will inherit all the settings from the source model. But in this example, we are overriding the Template.
|
||||
|
||||
When providing examples for the input and output, you should include the way the model usually provides information. Since the Orca model expects a user prompt to appear after ### User: and the response is after ### Response, we should format our examples like that as well. If we were using the Llama 2 model, the format would be a bit different.
|
20
examples/python-dockerit/Modelfile
Normal file
20
examples/python-dockerit/Modelfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM mistral
|
||||
SYSTEM """
|
||||
You are an experienced Devops engineer focused on docker. When given specifications for a particular need or application you know the best way to host that within a docker container. For instance if someone tells you they want an nginx server to host files located at /web you will answer as follows
|
||||
|
||||
---start
|
||||
FROM nginx:alpine
|
||||
COPY /myweb /usr/share/nginx/html
|
||||
EXPOSE 80
|
||||
---end
|
||||
|
||||
Notice that the answer you should give is just the contents of the dockerfile with no explanation and there are three dashes and the word start at the beginning and 3 dashes and the word end. The full output can be piped into a file and run as is. Here is another example. The user will ask to launch a Postgres server with a password of abc123. And the response should be
|
||||
|
||||
---start
|
||||
FROM postgres:latest
|
||||
ENV POSTGRES_PASSWORD=abc123
|
||||
EXPOSE 5432
|
||||
---end
|
||||
|
||||
Again it's just the contents of the dockerfile and nothing else.
|
||||
"""
|
15
examples/python-dockerit/README.md
Normal file
15
examples/python-dockerit/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# DockerIt
|
||||
|
||||
DockerIt is a tool to help you build and run your application in a Docker container. It consists of a model that defines the system prompt and model weights to use, along with a python script to then build the container and run the image automatically.
|
||||
|
||||
## Caveats
|
||||
|
||||
This is an simple example. It's assuming the Dockerfile content generated is going to work. In many cases, even with simple web servers, it fails when trying to copy files that don't exist. It's simply an example of what you could possibly do.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```bash
|
||||
> python3 ./dockerit.py "simple postgres server with admin password set to 123"
|
||||
Enter the name of the image: matttest
|
||||
Container named happy_keller started with id: 7c201bb6c30f02b356ddbc8e2a5af9d7d7d7b8c228519c9a501d15c0bd9d6b3e
|
||||
```
|
17
examples/python-dockerit/dockerit.py
Normal file
17
examples/python-dockerit/dockerit.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import requests, json, docker, io, sys
|
||||
inputDescription = " ".join(sys.argv[1:])
|
||||
imageName = input("Enter the name of the image: ")
|
||||
client = docker.from_env()
|
||||
s = requests.Session()
|
||||
output=""
|
||||
with s.post('http://localhost:11434/api/generate', json={'model': 'dockerit', 'prompt': inputDescription}, stream=True) as r:
|
||||
for line in r.iter_lines():
|
||||
if line:
|
||||
j = json.loads(line)
|
||||
if "response" in j:
|
||||
output = output +j["response"]
|
||||
output = output[output.find("---start")+9:output.find("---end")-1]
|
||||
f = io.BytesIO(bytes(output, 'utf-8'))
|
||||
client.images.build(fileobj=f, tag=imageName)
|
||||
container = client.containers.run(imageName, detach=True)
|
||||
print("Container named", container.name, " started with id: ",container.id)
|
1
examples/python-dockerit/requirements.txt
Normal file
1
examples/python-dockerit/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
docker
|
22
examples/python-rag-newssummary/README.md
Normal file
22
examples/python-rag-newssummary/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# News Summarizer
|
||||
|
||||
This example goes through a series of steps:
|
||||
|
||||
1. You choose a topic area (e.g., "news", "NVidia", "music", etc.).
|
||||
2. Gets the most recent articles on that topic from various sources.
|
||||
3. Uses Ollama to summarize each article.
|
||||
4. Creates chunks of sentences from each article.
|
||||
5. Uses Sentence Transformers to generate embeddings for each of those chunks.
|
||||
6. You enter a question regarding the summaries shown.
|
||||
7. Uses Sentence Transformers to generate an embedding for that question.
|
||||
8. Uses the embedded question to find the most similar chunks.
|
||||
9. Feeds all that to Ollama to generate a good answer to your question based on these news articles.
|
||||
|
||||
This example lets you pick from a few different topic areas, then summarize the most recent x articles for that topic. It then creates chunks of sentences from each article and then generates embeddings for each of those chunks.
|
||||
|
||||
You can run the example like this:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
python summ.py
|
||||
```
|
9
examples/python-rag-newssummary/requirements.txt
Normal file
9
examples/python-rag-newssummary/requirements.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
beautifulsoup4==4.12.2
|
||||
feedparser==6.0.10
|
||||
mattsollamatools==0.0.8
|
||||
newspaper3k==0.2.8
|
||||
nltk==3.8.1
|
||||
numpy==1.24.3
|
||||
Requests==2.31.0
|
||||
scikit_learn==1.3.0
|
||||
sentence_transformers==2.2.2
|
86
examples/python-rag-newssummary/summ.py
Normal file
86
examples/python-rag-newssummary/summ.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import curses
|
||||
import json
|
||||
from utils import get_url_for_topic, topic_urls, menu, getUrls, get_summary, getArticleText, knn_search
|
||||
import requests
|
||||
from sentence_transformers import SentenceTransformer
|
||||
from mattsollamatools import chunker
|
||||
|
||||
if __name__ == "__main__":
|
||||
chosen_topic = curses.wrapper(menu)
|
||||
print("Here is your news summary:\n")
|
||||
urls = getUrls(chosen_topic, n=5)
|
||||
model = SentenceTransformer('all-MiniLM-L6-v2')
|
||||
allEmbeddings = []
|
||||
|
||||
for url in urls:
|
||||
article={}
|
||||
article['embeddings'] = []
|
||||
article['url'] = url
|
||||
text = getArticleText(url)
|
||||
summary = get_summary(text)
|
||||
chunks = chunker(text) # Use the chunk_text function from web_utils
|
||||
embeddings = model.encode(chunks)
|
||||
for (chunk, embedding) in zip(chunks, embeddings):
|
||||
item = {}
|
||||
item['source'] = chunk
|
||||
item['embedding'] = embedding.tolist() # Convert NumPy array to list
|
||||
item['sourcelength'] = len(chunk)
|
||||
article['embeddings'].append(item)
|
||||
|
||||
allEmbeddings.append(article)
|
||||
|
||||
print(f"{summary}\n")
|
||||
|
||||
|
||||
while True:
|
||||
context = []
|
||||
# Input a question from the user
|
||||
question = input("Enter your question about the news, or type quit: ")
|
||||
|
||||
if question.lower() == 'quit':
|
||||
break
|
||||
|
||||
# Embed the user's question
|
||||
question_embedding = model.encode([question])
|
||||
|
||||
# Perform KNN search to find the best matches (indices and source text)
|
||||
best_matches = knn_search(question_embedding, allEmbeddings, k=10)
|
||||
|
||||
|
||||
sourcetext=""
|
||||
for i, (index, source_text) in enumerate(best_matches, start=1):
|
||||
sourcetext += f"{i}. Index: {index}, Source Text: {source_text}"
|
||||
|
||||
systemPrompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}"
|
||||
|
||||
url = "http://localhost:11434/api/generate"
|
||||
|
||||
payload = {
|
||||
"model": "mistral-openorca",
|
||||
"prompt": question,
|
||||
"system": systemPrompt,
|
||||
"stream": False,
|
||||
"context": context
|
||||
}
|
||||
|
||||
# Convert the payload to a JSON string
|
||||
payload_json = json.dumps(payload)
|
||||
|
||||
# Set the headers to specify JSON content
|
||||
headers = {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# Send the POST request
|
||||
response = requests.post(url, data=payload_json, headers=headers)
|
||||
|
||||
# Check the response
|
||||
if response.status_code == 200:
|
||||
output = json.loads(response.text)
|
||||
context = output['context']
|
||||
print(output['response']+ "\n")
|
||||
|
||||
|
||||
else:
|
||||
print(f"Request failed with status code {response.status_code}")
|
||||
|
108
examples/python-rag-newssummary/utils.py
Normal file
108
examples/python-rag-newssummary/utils.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import curses
|
||||
import feedparser
|
||||
import requests
|
||||
import unicodedata
|
||||
import json
|
||||
from newspaper import Article
|
||||
from bs4 import BeautifulSoup
|
||||
from nltk.tokenize import sent_tokenize, word_tokenize
|
||||
import numpy as np
|
||||
from sklearn.neighbors import NearestNeighbors
|
||||
from mattsollamatools import chunker
|
||||
|
||||
# Create a dictionary to store topics and their URLs
|
||||
topic_urls = {
|
||||
"Mac": "https://9to5mac.com/guides/mac/feed",
|
||||
"News": "http://www.npr.org/rss/rss.php?id=1001",
|
||||
"Nvidia": "https://nvidianews.nvidia.com/releases.xml",
|
||||
"Raspberry Pi": "https://www.raspberrypi.com/news/feed/",
|
||||
"Music": "https://www.billboard.com/c/music/music-news/feed/"
|
||||
}
|
||||
|
||||
# Use curses to create a menu of topics
|
||||
def menu(stdscr):
|
||||
chosen_topic = get_url_for_topic(stdscr)
|
||||
url = topic_urls[chosen_topic] if chosen_topic in topic_urls else "Topic not found"
|
||||
|
||||
stdscr.addstr(len(topic_urls) + 3, 0, f"Selected URL for {chosen_topic}: {url}")
|
||||
stdscr.refresh()
|
||||
|
||||
return chosen_topic
|
||||
|
||||
# You have chosen a topic. Now return the url for that topic
|
||||
def get_url_for_topic(stdscr):
|
||||
curses.curs_set(0) # Hide the cursor
|
||||
stdscr.clear()
|
||||
|
||||
stdscr.addstr(0, 0, "Choose a topic using the arrow keys (Press Enter to select):")
|
||||
|
||||
# Create a list of topics
|
||||
topics = list(topic_urls.keys())
|
||||
current_topic = 0
|
||||
|
||||
while True:
|
||||
for i, topic in enumerate(topics):
|
||||
if i == current_topic:
|
||||
stdscr.addstr(i + 2, 2, f"> {topic}")
|
||||
else:
|
||||
stdscr.addstr(i + 2, 2, f" {topic}")
|
||||
|
||||
stdscr.refresh()
|
||||
|
||||
key = stdscr.getch()
|
||||
|
||||
if key == curses.KEY_DOWN and current_topic < len(topics) - 1:
|
||||
current_topic += 1
|
||||
elif key == curses.KEY_UP and current_topic > 0:
|
||||
current_topic -= 1
|
||||
elif key == 10: # Enter key
|
||||
return topic_urls[topics[current_topic]]
|
||||
|
||||
# Get the last N URLs from an RSS feed
|
||||
def getUrls(feed_url, n=20):
|
||||
feed = feedparser.parse(feed_url)
|
||||
entries = feed.entries[-n:]
|
||||
urls = [entry.link for entry in entries]
|
||||
return urls
|
||||
|
||||
# Often there are a bunch of ads and menus on pages for a news article. This uses newspaper3k to get just the text of just the article.
|
||||
def getArticleText(url):
|
||||
article = Article(url)
|
||||
article.download()
|
||||
article.parse()
|
||||
return article.text
|
||||
|
||||
def get_summary(text):
|
||||
systemPrompt = "Write a concise summary of the text, return your responses with 5 lines that cover the key points of the text given."
|
||||
prompt = text
|
||||
|
||||
url = "http://localhost:11434/api/generate"
|
||||
|
||||
payload = {
|
||||
"model": "mistral-openorca",
|
||||
"prompt": prompt,
|
||||
"system": systemPrompt,
|
||||
"stream": False
|
||||
}
|
||||
payload_json = json.dumps(payload)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
response = requests.post(url, data=payload_json, headers=headers)
|
||||
|
||||
return json.loads(response.text)["response"]
|
||||
|
||||
# Perform K-nearest neighbors (KNN) search
|
||||
def knn_search(question_embedding, embeddings, k=5):
|
||||
X = np.array([item['embedding'] for article in embeddings for item in article['embeddings']])
|
||||
source_texts = [item['source'] for article in embeddings for item in article['embeddings']]
|
||||
|
||||
# Fit a KNN model on the embeddings
|
||||
knn = NearestNeighbors(n_neighbors=k, metric='cosine')
|
||||
knn.fit(X)
|
||||
|
||||
# Find the indices and distances of the k-nearest neighbors
|
||||
distances, indices = knn.kneighbors(question_embedding, n_neighbors=k)
|
||||
|
||||
# Get the indices and source texts of the best matches
|
||||
best_matches = [(indices[0][i], source_texts[indices[0][i]]) for i in range(k)]
|
||||
|
||||
return best_matches
|
38
examples/python-simplegenerate/client.py
Normal file
38
examples/python-simplegenerate/client.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import json
|
||||
import requests
|
||||
|
||||
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
||||
model = 'llama2' # TODO: update this for whatever model you wish to use
|
||||
|
||||
def generate(prompt, context):
|
||||
r = requests.post('http://localhost:11434/api/generate',
|
||||
json={
|
||||
'model': model,
|
||||
'prompt': prompt,
|
||||
'context': context,
|
||||
},
|
||||
stream=True)
|
||||
r.raise_for_status()
|
||||
|
||||
for line in r.iter_lines():
|
||||
body = json.loads(line)
|
||||
response_part = body.get('response', '')
|
||||
# the response streams one token at a time, print that as we recieve it
|
||||
print(response_part, end='', flush=True)
|
||||
|
||||
if 'error' in body:
|
||||
raise Exception(body['error'])
|
||||
|
||||
if body.get('done', False):
|
||||
return body['context']
|
||||
|
||||
def main():
|
||||
context = [] # the context stores a conversation history, you can use this to make the model more context aware
|
||||
while True:
|
||||
user_input = input("Enter a prompt: ")
|
||||
print()
|
||||
context = generate(user_input, context)
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
2
examples/typescript-mentors/.gitignore
vendored
Normal file
2
examples/typescript-mentors/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
node_modules
|
||||
package-lock.json
|
21
examples/typescript-mentors/README.md
Normal file
21
examples/typescript-mentors/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Ask the Mentors
|
||||
|
||||
This example demonstrates how one would create a set of 'mentors' you can have a conversation with. The mentors are generated using the `character-generator.ts` file. This will use **Stable Beluga 70b** to create a bio and list of verbal ticks and common phrases used by each person. Then `mentors.ts` will take a question, and choose three of the 'mentors' and start a conversation with them. Occasionally, they will talk to each other, and other times they will just deliver a set of monologues. It's fun to see what they do and say.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
ts-node ./character-generator.ts "Lorne Greene"
|
||||
```
|
||||
|
||||
This will create `lornegreene/Modelfile`. Now you can create a model with this command:
|
||||
|
||||
```bash
|
||||
ollama create lornegreene -f lornegreene/Modelfile
|
||||
```
|
||||
|
||||
If you want to add your own mentors, you will have to update the code to look at your namespace instead of **mattw**. Also set the list of mentors to include yours.
|
||||
|
||||
```bash
|
||||
ts-node ./mentors.ts "What is a Jackalope?"
|
||||
```
|
26
examples/typescript-mentors/character-generator.ts
Normal file
26
examples/typescript-mentors/character-generator.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import { Ollama } from 'ollama-node'
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
|
||||
async function characterGenerator() {
|
||||
const character = process.argv[2];
|
||||
console.log(`You are creating a character for ${character}.`);
|
||||
const foldername = character.replace(/\s/g, '').toLowerCase();
|
||||
const directory = path.join(__dirname, foldername);
|
||||
if (!fs.existsSync(directory)) {
|
||||
fs.mkdirSync(directory, { recursive: true });
|
||||
}
|
||||
|
||||
const ollama = new Ollama();
|
||||
ollama.setModel("stablebeluga2:70b-q4_K_M");
|
||||
const bio = await ollama.generate(`create a bio of ${character} in a single long paragraph. Instead of saying '${character} is...' or '${character} was...' use language like 'You are...' or 'You were...'. Then create a paragraph describing the speaking mannerisms and style of ${character}. Don't include anything about how ${character} looked or what they sounded like, just focus on the words they said. Instead of saying '${character} would say...' use language like 'You should say...'. If you use quotes, always use single quotes instead of double quotes. If there are any specific words or phrases you used a lot, show how you used them. `);
|
||||
|
||||
const thecontents = `FROM llama2\nSYSTEM """\n${bio.response.replace(/(\r\n|\n|\r)/gm, " ").replace('would', 'should')} All answers to questions should be related back to what you are most known for.\n"""`;
|
||||
|
||||
fs.writeFile(path.join(directory, 'Modelfile'), thecontents, (err: any) => {
|
||||
if (err) throw err;
|
||||
console.log('The file has been saved!');
|
||||
});
|
||||
}
|
||||
|
||||
characterGenerator();
|
59
examples/typescript-mentors/mentors.ts
Normal file
59
examples/typescript-mentors/mentors.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import { Ollama } from 'ollama-node';
|
||||
|
||||
const mentorCount = 3;
|
||||
const ollama = new Ollama();
|
||||
|
||||
function getMentors(): string[] {
|
||||
const mentors = ['Gary Vaynerchuk', 'Kanye West', 'Martha Stewart', 'Neil deGrasse Tyson', 'Owen Wilson', 'Ronald Reagan', 'Donald Trump', 'Barack Obama', 'Jeff Bezos'];
|
||||
const chosenMentors: string[] = [];
|
||||
for (let i = 0; i < mentorCount; i++) {
|
||||
const mentor = mentors[Math.floor(Math.random() * mentors.length)];
|
||||
chosenMentors.push(mentor);
|
||||
mentors.splice(mentors.indexOf(mentor), 1);
|
||||
}
|
||||
return chosenMentors;
|
||||
}
|
||||
|
||||
function getMentorFileName(mentor: string): string {
|
||||
const model = mentor.toLowerCase().replace(/\s/g, '');
|
||||
return `mattw/${model}`;
|
||||
}
|
||||
|
||||
async function getSystemPrompt(mentor: string, isLast: boolean, question: string): Promise<string> {
|
||||
ollama.setModel(getMentorFileName(mentor));
|
||||
const info = await ollama.showModelInfo()
|
||||
let SystemPrompt = info.system || '';
|
||||
SystemPrompt += ` You should continue the conversation as if you were ${mentor} and acknowledge the people before you in the conversation. You should adopt their mannerisms and tone, but also not use language they wouldn't use. If they are not known to know about the concept in the question, don't offer an answer. Your answer should be no longer than 1 paragraph. And definitely try not to sound like anyone else. Don't repeat any slang or phrases already used. And if it is a question the original ${mentor} wouldn't have know the answer to, just say that you don't know, in the style of ${mentor}. And think about the time the person lived. Don't use terminology that they wouldn't have used.`
|
||||
|
||||
if (isLast) {
|
||||
SystemPrompt += ` End your answer with something like I hope our answers help you out`;
|
||||
} else {
|
||||
SystemPrompt += ` Remember, this is a conversation, so you don't need a conclusion, but end your answer with a question related to the first question: "${question}".`;
|
||||
}
|
||||
return SystemPrompt;
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const mentors = getMentors();
|
||||
const question = process.argv[2];
|
||||
let theConversation = `Here is the conversation so far.\nYou: ${question}\n`
|
||||
|
||||
for await (const mentor of mentors) {
|
||||
const SystemPrompt = await getSystemPrompt(mentor, mentor === mentors[mentorCount - 1], question);
|
||||
ollama.setModel(getMentorFileName(mentor));
|
||||
ollama.setSystemPrompt(SystemPrompt);
|
||||
let output = '';
|
||||
process.stdout.write(`\n${mentor}: `);
|
||||
for await (const chunk of ollama.streamingGenerate(theConversation + `Continue the conversation as if you were ${mentor} on the question "${question}".`)) {
|
||||
if (chunk.response) {
|
||||
output += chunk.response;
|
||||
process.stdout.write(chunk.response);
|
||||
} else {
|
||||
process.stdout.write('\n');
|
||||
}
|
||||
}
|
||||
theConversation += `${mentor}: ${output}\n\n`
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
7
examples/typescript-mentors/package.json
Normal file
7
examples/typescript-mentors/package.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"fs": "^0.0.1-security",
|
||||
"ollama-node": "^0.0.3",
|
||||
"path": "^0.12.7"
|
||||
}
|
||||
}
|
23
format/bytes.go
Normal file
23
format/bytes.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package format
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
Byte = 1
|
||||
KiloByte = Byte * 1000
|
||||
MegaByte = KiloByte * 1000
|
||||
GigaByte = MegaByte * 1000
|
||||
)
|
||||
|
||||
func HumanBytes(b int64) string {
|
||||
switch {
|
||||
case b > GigaByte:
|
||||
return fmt.Sprintf("%d GB", b/GigaByte)
|
||||
case b > MegaByte:
|
||||
return fmt.Sprintf("%d MB", b/MegaByte)
|
||||
case b > KiloByte:
|
||||
return fmt.Sprintf("%d KB", b/KiloByte)
|
||||
default:
|
||||
return fmt.Sprintf("%d B", b)
|
||||
}
|
||||
}
|
102
format/openssh.go
Normal file
102
format/openssh.go
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code originally from https://go-review.googlesource.com/c/crypto/+/218620
|
||||
|
||||
// TODO: replace with upstream once the above change is merged and released.
|
||||
|
||||
package format
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const privateKeyAuthMagic = "openssh-key-v1\x00"
|
||||
|
||||
type openSSHEncryptedPrivateKey struct {
|
||||
CipherName string
|
||||
KDFName string
|
||||
KDFOptions string
|
||||
KeysCount uint32
|
||||
PubKey []byte
|
||||
KeyBlocks []byte
|
||||
}
|
||||
|
||||
type openSSHPrivateKey struct {
|
||||
Check1 uint32
|
||||
Check2 uint32
|
||||
Keytype string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type openSSHEd25519PrivateKey struct {
|
||||
Pub []byte
|
||||
Priv []byte
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
func OpenSSHPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) {
|
||||
var check uint32
|
||||
if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pk1 openSSHPrivateKey
|
||||
pk1.Check1 = check
|
||||
pk1.Check2 = check
|
||||
|
||||
var w openSSHEncryptedPrivateKey
|
||||
w.KeysCount = 1
|
||||
|
||||
if k, ok := key.(*ed25519.PrivateKey); ok {
|
||||
key = *k
|
||||
}
|
||||
|
||||
switch k := key.(type) {
|
||||
case ed25519.PrivateKey:
|
||||
pub, priv := k[32:], k
|
||||
key := openSSHEd25519PrivateKey{
|
||||
Pub: pub,
|
||||
Priv: priv,
|
||||
Comment: comment,
|
||||
}
|
||||
|
||||
pk1.Keytype = ssh.KeyAlgoED25519
|
||||
pk1.Rest = ssh.Marshal(key)
|
||||
|
||||
w.PubKey = ssh.Marshal(struct {
|
||||
KeyType string
|
||||
Pub []byte
|
||||
}{
|
||||
ssh.KeyAlgoED25519, pub,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unknown key type %T", k)
|
||||
}
|
||||
|
||||
w.KeyBlocks = openSSHPadding(ssh.Marshal(pk1), 8)
|
||||
|
||||
w.CipherName, w.KDFName, w.KDFOptions = "none", "none", ""
|
||||
|
||||
return &pem.Block{
|
||||
Type: "OPENSSH PRIVATE KEY",
|
||||
Bytes: append([]byte(privateKeyAuthMagic), ssh.Marshal(w)...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func openSSHPadding(block []byte, blocksize int) []byte {
|
||||
for i, j := 0, len(block); (j+i)%blocksize != 0; i++ {
|
||||
block = append(block, byte(i+1))
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
@@ -7,26 +7,14 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// HumanDuration returns a human-readable approximation of a duration
|
||||
// (eg. "About a minute", "4 hours ago", etc.).
|
||||
// Modified version of github.com/docker/go-units.HumanDuration
|
||||
func HumanDuration(d time.Duration) string {
|
||||
return HumanDurationWithCase(d, true)
|
||||
}
|
||||
|
||||
// HumanDurationWithCase returns a human-readable approximation of a
|
||||
// duration (eg. "About a minute", "4 hours ago", etc.). but allows
|
||||
// you to specify whether the first word should be capitalized
|
||||
// (eg. "About" vs. "about")
|
||||
func HumanDurationWithCase(d time.Duration, useCaps bool) string {
|
||||
// humanDuration returns a human-readable approximation of a
|
||||
// duration (eg. "About a minute", "4 hours ago", etc.).
|
||||
func humanDuration(d time.Duration) string {
|
||||
seconds := int(d.Seconds())
|
||||
|
||||
switch {
|
||||
case seconds < 1:
|
||||
if useCaps {
|
||||
return "Less than a second"
|
||||
}
|
||||
return "less than a second"
|
||||
return "Less than a second"
|
||||
case seconds == 1:
|
||||
return "1 second"
|
||||
case seconds < 60:
|
||||
@@ -36,10 +24,7 @@ func HumanDurationWithCase(d time.Duration, useCaps bool) string {
|
||||
minutes := int(d.Minutes())
|
||||
switch {
|
||||
case minutes == 1:
|
||||
if useCaps {
|
||||
return "About a minute"
|
||||
}
|
||||
return "about a minute"
|
||||
return "About a minute"
|
||||
case minutes < 60:
|
||||
return fmt.Sprintf("%d minutes", minutes)
|
||||
}
|
||||
@@ -47,10 +32,7 @@ func HumanDurationWithCase(d time.Duration, useCaps bool) string {
|
||||
hours := int(math.Round(d.Hours()))
|
||||
switch {
|
||||
case hours == 1:
|
||||
if useCaps {
|
||||
return "About an hour"
|
||||
}
|
||||
return "about an hour"
|
||||
return "About an hour"
|
||||
case hours < 48:
|
||||
return fmt.Sprintf("%d hours", hours)
|
||||
case hours < 24*7*2:
|
||||
@@ -65,77 +47,22 @@ func HumanDurationWithCase(d time.Duration, useCaps bool) string {
|
||||
}
|
||||
|
||||
func HumanTime(t time.Time, zeroValue string) string {
|
||||
return humanTimeWithCase(t, zeroValue, true)
|
||||
return humanTime(t, zeroValue)
|
||||
}
|
||||
|
||||
func HumanTimeLower(t time.Time, zeroValue string) string {
|
||||
return humanTimeWithCase(t, zeroValue, false)
|
||||
return strings.ToLower(humanTime(t, zeroValue))
|
||||
}
|
||||
|
||||
func humanTimeWithCase(t time.Time, zeroValue string, useCaps bool) string {
|
||||
func humanTime(t time.Time, zeroValue string) string {
|
||||
if t.IsZero() {
|
||||
return zeroValue
|
||||
}
|
||||
|
||||
delta := time.Since(t)
|
||||
if delta < 0 {
|
||||
return HumanDurationWithCase(-delta, useCaps) + " from now"
|
||||
return humanDuration(-delta) + " from now"
|
||||
}
|
||||
return HumanDurationWithCase(delta, useCaps) + " ago"
|
||||
}
|
||||
|
||||
// ExcatDuration returns a human readable hours/minutes/seconds or milliseconds format of a duration
|
||||
// the most precise level of duration is milliseconds
|
||||
func ExactDuration(d time.Duration) string {
|
||||
if d.Seconds() < 1 {
|
||||
if d.Milliseconds() == 1 {
|
||||
return fmt.Sprintf("%d millisecond", d.Milliseconds())
|
||||
}
|
||||
return fmt.Sprintf("%d milliseconds", d.Milliseconds())
|
||||
}
|
||||
|
||||
var readableDur strings.Builder
|
||||
|
||||
dur := d.String()
|
||||
|
||||
// split the default duration string format of 0h0m0s into something nicer to read
|
||||
h := strings.Split(dur, "h")
|
||||
if len(h) > 1 {
|
||||
hours := h[0]
|
||||
if hours == "1" {
|
||||
readableDur.WriteString(fmt.Sprintf("%s hour ", hours))
|
||||
} else {
|
||||
readableDur.WriteString(fmt.Sprintf("%s hours ", hours))
|
||||
}
|
||||
dur = h[1]
|
||||
}
|
||||
|
||||
m := strings.Split(dur, "m")
|
||||
if len(m) > 1 {
|
||||
mins := m[0]
|
||||
switch mins {
|
||||
case "0":
|
||||
// skip
|
||||
case "1":
|
||||
readableDur.WriteString(fmt.Sprintf("%s minute ", mins))
|
||||
default:
|
||||
readableDur.WriteString(fmt.Sprintf("%s minutes ", mins))
|
||||
}
|
||||
dur = m[1]
|
||||
}
|
||||
|
||||
s := strings.Split(dur, "s")
|
||||
if len(s) > 0 {
|
||||
sec := s[0]
|
||||
switch sec {
|
||||
case "0":
|
||||
// skip
|
||||
case "1":
|
||||
readableDur.WriteString(fmt.Sprintf("%s second ", sec))
|
||||
default:
|
||||
readableDur.WriteString(fmt.Sprintf("%s seconds ", sec))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.TrimSpace(readableDur.String())
|
||||
|
||||
return humanDuration(delta) + " ago"
|
||||
}
|
||||
|
@@ -11,92 +11,25 @@ func assertEqual(t *testing.T, a interface{}, b interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHumanDuration(t *testing.T) {
|
||||
day := 24 * time.Hour
|
||||
week := 7 * day
|
||||
month := 30 * day
|
||||
year := 365 * day
|
||||
|
||||
assertEqual(t, "Less than a second", HumanDuration(450*time.Millisecond))
|
||||
assertEqual(t, "Less than a second", HumanDurationWithCase(450*time.Millisecond, true))
|
||||
assertEqual(t, "less than a second", HumanDurationWithCase(450*time.Millisecond, false))
|
||||
assertEqual(t, "1 second", HumanDuration(1*time.Second))
|
||||
assertEqual(t, "45 seconds", HumanDuration(45*time.Second))
|
||||
assertEqual(t, "46 seconds", HumanDuration(46*time.Second))
|
||||
assertEqual(t, "59 seconds", HumanDuration(59*time.Second))
|
||||
assertEqual(t, "About a minute", HumanDuration(60*time.Second))
|
||||
assertEqual(t, "About a minute", HumanDurationWithCase(1*time.Minute, true))
|
||||
assertEqual(t, "about a minute", HumanDurationWithCase(1*time.Minute, false))
|
||||
assertEqual(t, "3 minutes", HumanDuration(3*time.Minute))
|
||||
assertEqual(t, "35 minutes", HumanDuration(35*time.Minute))
|
||||
assertEqual(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
|
||||
assertEqual(t, "45 minutes", HumanDuration(45*time.Minute))
|
||||
assertEqual(t, "45 minutes", HumanDuration(45*time.Minute+40*time.Second))
|
||||
assertEqual(t, "46 minutes", HumanDuration(46*time.Minute))
|
||||
assertEqual(t, "59 minutes", HumanDuration(59*time.Minute))
|
||||
assertEqual(t, "About an hour", HumanDuration(1*time.Hour))
|
||||
assertEqual(t, "About an hour", HumanDurationWithCase(1*time.Hour+29*time.Minute, true))
|
||||
assertEqual(t, "about an hour", HumanDurationWithCase(1*time.Hour+29*time.Minute, false))
|
||||
assertEqual(t, "2 hours", HumanDuration(1*time.Hour+31*time.Minute))
|
||||
assertEqual(t, "2 hours", HumanDuration(1*time.Hour+59*time.Minute))
|
||||
assertEqual(t, "3 hours", HumanDuration(3*time.Hour))
|
||||
assertEqual(t, "3 hours", HumanDuration(3*time.Hour+29*time.Minute))
|
||||
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+31*time.Minute))
|
||||
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+59*time.Minute))
|
||||
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
|
||||
assertEqual(t, "24 hours", HumanDuration(24*time.Hour))
|
||||
assertEqual(t, "36 hours", HumanDuration(1*day+12*time.Hour))
|
||||
assertEqual(t, "2 days", HumanDuration(2*day))
|
||||
assertEqual(t, "7 days", HumanDuration(7*day))
|
||||
assertEqual(t, "13 days", HumanDuration(13*day+5*time.Hour))
|
||||
assertEqual(t, "2 weeks", HumanDuration(2*week))
|
||||
assertEqual(t, "2 weeks", HumanDuration(2*week+4*day))
|
||||
assertEqual(t, "3 weeks", HumanDuration(3*week))
|
||||
assertEqual(t, "4 weeks", HumanDuration(4*week))
|
||||
assertEqual(t, "4 weeks", HumanDuration(4*week+3*day))
|
||||
assertEqual(t, "4 weeks", HumanDuration(1*month))
|
||||
assertEqual(t, "6 weeks", HumanDuration(1*month+2*week))
|
||||
assertEqual(t, "2 months", HumanDuration(2*month))
|
||||
assertEqual(t, "2 months", HumanDuration(2*month+2*week))
|
||||
assertEqual(t, "3 months", HumanDuration(3*month))
|
||||
assertEqual(t, "3 months", HumanDuration(3*month+1*week))
|
||||
assertEqual(t, "5 months", HumanDuration(5*month+2*week))
|
||||
assertEqual(t, "13 months", HumanDuration(13*month))
|
||||
assertEqual(t, "23 months", HumanDuration(23*month))
|
||||
assertEqual(t, "24 months", HumanDuration(24*month))
|
||||
assertEqual(t, "2 years", HumanDuration(24*month+2*week))
|
||||
assertEqual(t, "3 years", HumanDuration(3*year+2*month))
|
||||
}
|
||||
|
||||
func TestHumanTime(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
t.Run("zero value", func(t *testing.T) {
|
||||
assertEqual(t, HumanTime(time.Time{}, "never"), "never")
|
||||
})
|
||||
|
||||
t.Run("time in the future", func(t *testing.T) {
|
||||
v := now.Add(48 * time.Hour)
|
||||
assertEqual(t, HumanTime(v, ""), "2 days from now")
|
||||
})
|
||||
|
||||
t.Run("time in the past", func(t *testing.T) {
|
||||
v := now.Add(-48 * time.Hour)
|
||||
assertEqual(t, HumanTime(v, ""), "2 days ago")
|
||||
})
|
||||
}
|
||||
|
||||
func TestExactDuration(t *testing.T) {
|
||||
assertEqual(t, "1 millisecond", ExactDuration(1*time.Millisecond))
|
||||
assertEqual(t, "10 milliseconds", ExactDuration(10*time.Millisecond))
|
||||
assertEqual(t, "1 second", ExactDuration(1*time.Second))
|
||||
assertEqual(t, "10 seconds", ExactDuration(10*time.Second))
|
||||
assertEqual(t, "1 minute", ExactDuration(1*time.Minute))
|
||||
assertEqual(t, "10 minutes", ExactDuration(10*time.Minute))
|
||||
assertEqual(t, "1 hour", ExactDuration(1*time.Hour))
|
||||
assertEqual(t, "10 hours", ExactDuration(10*time.Hour))
|
||||
assertEqual(t, "1 hour 1 second", ExactDuration(1*time.Hour+1*time.Second))
|
||||
assertEqual(t, "1 hour 10 seconds", ExactDuration(1*time.Hour+10*time.Second))
|
||||
assertEqual(t, "1 hour 1 minute", ExactDuration(1*time.Hour+1*time.Minute))
|
||||
assertEqual(t, "1 hour 10 minutes", ExactDuration(1*time.Hour+10*time.Minute))
|
||||
assertEqual(t, "1 hour 1 minute 1 second", ExactDuration(1*time.Hour+1*time.Minute+1*time.Second))
|
||||
assertEqual(t, "10 hours 10 minutes 10 seconds", ExactDuration(10*time.Hour+10*time.Minute+10*time.Second))
|
||||
t.Run("soon", func(t *testing.T) {
|
||||
v := now.Add(800 * time.Millisecond)
|
||||
assertEqual(t, HumanTime(v, ""), "Less than a second from now")
|
||||
})
|
||||
}
|
||||
|
10
go.mod
10
go.mod
@@ -8,7 +8,9 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.14
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/pdevine/readline v1.5.2
|
||||
github.com/spf13/cobra v1.7.0
|
||||
golang.org/x/sync v0.3.0
|
||||
)
|
||||
|
||||
require github.com/rivo/uniseg v0.2.0 // indirect
|
||||
@@ -16,7 +18,6 @@ require github.com/rivo/uniseg v0.2.0 // indirect
|
||||
require (
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/chzyer/readline v1.5.1
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/cors v1.4.0
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
@@ -32,17 +33,18 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.10.0 // indirect
|
||||
golang.org/x/crypto v0.10.0
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/term v0.10.0
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
gonum.org/v1/gonum v0.13.0
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
16
go.sum
16
go.sum
@@ -6,8 +6,6 @@ github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhD
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
@@ -78,6 +76,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pdevine/readline v1.5.2 h1:oz6Y5GdTmhPG+08hhxcAvtHitSANWuA2100Sppb38xI=
|
||||
github.com/pdevine/readline v1.5.2/go.mod h1:na/LbuE5PYwxI7GyopWdIs3U8HVe89lYlNTFTXH3wOw=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
@@ -118,9 +120,13 @@ golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
|
||||
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -128,8 +134,8 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
|
||||
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
|
||||
@@ -139,8 +145,6 @@ golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
|
||||
gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
|
@@ -1,63 +0,0 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
* Copyright (c) 2023 Georgi Gerganov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define GGML_CUDA_MAX_DEVICES 16
|
||||
|
||||
void ggml_init_cublas(void);
|
||||
void ggml_cuda_set_tensor_split(const float * tensor_split);
|
||||
|
||||
void ggml_cuda_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||
bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||
size_t ggml_cuda_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||
void ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
||||
|
||||
// TODO: export these with GGML_API
|
||||
void * ggml_cuda_host_malloc(size_t size);
|
||||
void ggml_cuda_host_free(void * ptr);
|
||||
|
||||
void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
|
||||
|
||||
void ggml_cuda_free_data(struct ggml_tensor * tensor);
|
||||
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
|
||||
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
|
||||
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
|
||||
void ggml_cuda_set_main_device(int main_device);
|
||||
void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
|
||||
void ggml_cuda_set_scratch_size(size_t scratch_size);
|
||||
void ggml_cuda_free_scratch(void);
|
||||
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
1159
llama/ggml-metal.m
1159
llama/ggml-metal.m
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,530 +0,0 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
* Copyright (c) 2023 Georgi Gerganov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
// Internal header to be included only by llama.cpp.
|
||||
// Contains wrappers around OS interfaces.
|
||||
|
||||
#ifndef LLAMA_UTIL_H
|
||||
#define LLAMA_UTIL_H
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdint>
|
||||
#include <cerrno>
|
||||
#include <cstring>
|
||||
#include <cstdarg>
|
||||
#include <cstdlib>
|
||||
#include <climits>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <stdexcept>
|
||||
|
||||
#ifdef __has_include
|
||||
#if __has_include(<unistd.h>)
|
||||
#include <unistd.h>
|
||||
#if defined(_POSIX_MAPPED_FILES)
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
#if defined(_POSIX_MEMLOCK_RANGE)
|
||||
#include <sys/resource.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
#include <windows.h>
|
||||
#include <io.h>
|
||||
#include <stdio.h> // for _fseeki64
|
||||
#endif
|
||||
|
||||
#define LLAMA_ASSERT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __GNUC__
|
||||
#ifdef __MINGW32__
|
||||
__attribute__((format(gnu_printf, 1, 2)))
|
||||
#else
|
||||
__attribute__((format(printf, 1, 2)))
|
||||
#endif
|
||||
#endif
|
||||
static std::string format(const char * fmt, ...) {
|
||||
va_list ap, ap2;
|
||||
va_start(ap, fmt);
|
||||
va_copy(ap2, ap);
|
||||
int size = vsnprintf(NULL, 0, fmt, ap);
|
||||
LLAMA_ASSERT(size >= 0 && size < INT_MAX);
|
||||
std::vector<char> buf(size + 1);
|
||||
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
||||
LLAMA_ASSERT(size2 == size);
|
||||
va_end(ap2);
|
||||
va_end(ap);
|
||||
return std::string(buf.data(), size);
|
||||
}
|
||||
|
||||
struct llama_file {
|
||||
// use FILE * so we don't have to re-open the file to mmap
|
||||
FILE * fp;
|
||||
size_t size;
|
||||
|
||||
llama_file(const char * fname, const char * mode) {
|
||||
fp = std::fopen(fname, mode);
|
||||
if (fp == NULL) {
|
||||
throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
|
||||
}
|
||||
seek(0, SEEK_END);
|
||||
size = tell();
|
||||
seek(0, SEEK_SET);
|
||||
}
|
||||
|
||||
size_t tell() const {
|
||||
#ifdef _WIN32
|
||||
__int64 ret = _ftelli64(fp);
|
||||
#else
|
||||
long ret = std::ftell(fp);
|
||||
#endif
|
||||
LLAMA_ASSERT(ret != -1); // this really shouldn't fail
|
||||
return (size_t) ret;
|
||||
}
|
||||
|
||||
void seek(size_t offset, int whence) {
|
||||
#ifdef _WIN32
|
||||
int ret = _fseeki64(fp, (__int64) offset, whence);
|
||||
#else
|
||||
int ret = std::fseek(fp, (long) offset, whence);
|
||||
#endif
|
||||
LLAMA_ASSERT(ret == 0); // same
|
||||
}
|
||||
|
||||
void read_raw(void * ptr, size_t len) const {
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
std::size_t ret = std::fread(ptr, len, 1, fp);
|
||||
if (ferror(fp)) {
|
||||
throw std::runtime_error(format("read error: %s", strerror(errno)));
|
||||
}
|
||||
if (ret != 1) {
|
||||
throw std::runtime_error(std::string("unexpectedly reached end of file"));
|
||||
}
|
||||
}
|
||||
|
||||
std::uint32_t read_u32() {
|
||||
std::uint32_t ret;
|
||||
read_raw(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string read_string(std::uint32_t len) {
|
||||
std::vector<char> chars(len);
|
||||
read_raw(chars.data(), len);
|
||||
return std::string(chars.data(), len);
|
||||
}
|
||||
|
||||
void write_raw(const void * ptr, size_t len) const {
|
||||
if (len == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
size_t ret = std::fwrite(ptr, len, 1, fp);
|
||||
if (ret != 1) {
|
||||
throw std::runtime_error(format("write error: %s", strerror(errno)));
|
||||
}
|
||||
}
|
||||
|
||||
void write_u32(std::uint32_t val) {
|
||||
write_raw(&val, sizeof(val));
|
||||
}
|
||||
|
||||
~llama_file() {
|
||||
if (fp) {
|
||||
std::fclose(fp);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(_WIN32)
|
||||
static std::string llama_format_win_err(DWORD err) {
|
||||
LPSTR buf;
|
||||
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
|
||||
if (!size) {
|
||||
return "FormatMessageA failed";
|
||||
}
|
||||
std::string ret(buf, size);
|
||||
LocalFree(buf);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct llama_mmap {
|
||||
void * addr;
|
||||
size_t size;
|
||||
|
||||
llama_mmap(const llama_mmap &) = delete;
|
||||
|
||||
#ifdef _POSIX_MAPPED_FILES
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
|
||||
size = file->size;
|
||||
int fd = fileno(file->fp);
|
||||
int flags = MAP_SHARED;
|
||||
// prefetch/readahead impairs performance on NUMA systems
|
||||
if (numa) { prefetch = 0; }
|
||||
#ifdef __linux__
|
||||
if (prefetch) { flags |= MAP_POPULATE; }
|
||||
#endif
|
||||
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
|
||||
}
|
||||
|
||||
if (prefetch > 0) {
|
||||
// Advise the kernel to preload the mapped memory
|
||||
if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) {
|
||||
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
if (numa) {
|
||||
// advise the kernel not to use readahead
|
||||
// (because the next page might not belong on the same node)
|
||||
if (madvise(addr, file->size, MADV_RANDOM)) {
|
||||
fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~llama_mmap() {
|
||||
munmap(addr, size);
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
|
||||
(void) numa;
|
||||
|
||||
size = file->size;
|
||||
|
||||
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
|
||||
|
||||
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
|
||||
DWORD error = GetLastError();
|
||||
|
||||
if (hMapping == NULL) {
|
||||
throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
|
||||
}
|
||||
|
||||
addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
|
||||
error = GetLastError();
|
||||
CloseHandle(hMapping);
|
||||
|
||||
if (addr == NULL) {
|
||||
throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
|
||||
}
|
||||
|
||||
#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
if (prefetch) {
|
||||
// Advise the kernel to preload the mapped memory
|
||||
WIN32_MEMORY_RANGE_ENTRY range;
|
||||
range.VirtualAddress = addr;
|
||||
range.NumberOfBytes = (SIZE_T)size;
|
||||
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
#pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
|
||||
#endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
}
|
||||
|
||||
~llama_mmap() {
|
||||
if (!UnmapViewOfFile(addr)) {
|
||||
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
llama_mmap(struct llama_file *, bool prefetch = true, bool numa = false) {
|
||||
(void) prefetch;
|
||||
(void) numa;
|
||||
|
||||
throw std::runtime_error(std::string("mmap not supported"));
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Represents some region of memory being locked using mlock or VirtualLock;
|
||||
// will automatically unlock on destruction.
|
||||
struct llama_mlock {
|
||||
void * addr = NULL;
|
||||
size_t size = 0;
|
||||
bool failed_already = false;
|
||||
|
||||
llama_mlock() {}
|
||||
llama_mlock(const llama_mlock &) = delete;
|
||||
|
||||
~llama_mlock() {
|
||||
if (size) {
|
||||
raw_unlock(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void init(void * ptr) {
|
||||
LLAMA_ASSERT(addr == NULL && size == 0);
|
||||
addr = ptr;
|
||||
}
|
||||
|
||||
void grow_to(size_t target_size) {
|
||||
LLAMA_ASSERT(addr);
|
||||
if (failed_already) {
|
||||
return;
|
||||
}
|
||||
size_t granularity = lock_granularity();
|
||||
target_size = (target_size + granularity - 1) & ~(granularity - 1);
|
||||
if (target_size > size) {
|
||||
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
|
||||
size = target_size;
|
||||
} else {
|
||||
failed_already = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _POSIX_MEMLOCK_RANGE
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
return (size_t) sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
||||
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
||||
#else
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
||||
#endif
|
||||
|
||||
bool raw_lock(const void * addr, size_t size) {
|
||||
if (!mlock(addr, size)) {
|
||||
return true;
|
||||
} else {
|
||||
char* errmsg = std::strerror(errno);
|
||||
bool suggest = (errno == ENOMEM);
|
||||
|
||||
// Check if the resource limit is fine after all
|
||||
struct rlimit lock_limit;
|
||||
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
|
||||
suggest = false;
|
||||
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
|
||||
suggest = false;
|
||||
|
||||
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
||||
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#undef MLOCK_SUGGESTION
|
||||
|
||||
void raw_unlock(void * addr, size_t size) {
|
||||
if (munlock(addr, size)) {
|
||||
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
|
||||
}
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
SYSTEM_INFO si;
|
||||
GetSystemInfo(&si);
|
||||
return (size_t) si.dwPageSize;
|
||||
}
|
||||
|
||||
bool raw_lock(void * ptr, size_t len) {
|
||||
for (int tries = 1; ; tries++) {
|
||||
if (VirtualLock(ptr, len)) {
|
||||
return true;
|
||||
}
|
||||
if (tries == 2) {
|
||||
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
|
||||
len, size, llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// It failed but this was only the first try; increase the working
|
||||
// set size and try again.
|
||||
SIZE_T min_ws_size, max_ws_size;
|
||||
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
|
||||
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
// Per MSDN: "The maximum number of pages that a process can lock
|
||||
// is equal to the number of pages in its minimum working set minus
|
||||
// a small overhead."
|
||||
// Hopefully a megabyte is enough overhead:
|
||||
size_t increment = len + 1048576;
|
||||
// The minimum must be <= the maximum, so we need to increase both:
|
||||
min_ws_size += increment;
|
||||
max_ws_size += increment;
|
||||
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
|
||||
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void raw_unlock(void * ptr, size_t len) {
|
||||
if (!VirtualUnlock(ptr, len)) {
|
||||
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
size_t lock_granularity() {
|
||||
return (size_t) 65536;
|
||||
}
|
||||
|
||||
bool raw_lock(const void * addr, size_t len) {
|
||||
fprintf(stderr, "warning: mlock not supported on this system\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
void raw_unlock(const void * addr, size_t len) {}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
||||
struct llama_buffer {
|
||||
uint8_t * addr = NULL;
|
||||
size_t size = 0;
|
||||
|
||||
llama_buffer() = default;
|
||||
|
||||
void resize(size_t len) {
|
||||
#ifdef GGML_USE_METAL
|
||||
free(addr);
|
||||
int result = posix_memalign((void **) &addr, getpagesize(), len);
|
||||
if (result == 0) {
|
||||
memset(addr, 0, len);
|
||||
}
|
||||
else {
|
||||
addr = NULL;
|
||||
}
|
||||
#else
|
||||
delete[] addr;
|
||||
addr = new uint8_t[len];
|
||||
#endif
|
||||
size = len;
|
||||
}
|
||||
|
||||
~llama_buffer() {
|
||||
#ifdef GGML_USE_METAL
|
||||
free(addr);
|
||||
#else
|
||||
delete[] addr;
|
||||
#endif
|
||||
addr = NULL;
|
||||
}
|
||||
|
||||
// disable copy and move
|
||||
llama_buffer(const llama_buffer&) = delete;
|
||||
llama_buffer(llama_buffer&&) = delete;
|
||||
llama_buffer& operator=(const llama_buffer&) = delete;
|
||||
llama_buffer& operator=(llama_buffer&&) = delete;
|
||||
};
|
||||
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
struct llama_ctx_buffer {
|
||||
uint8_t * addr = NULL;
|
||||
bool is_cuda;
|
||||
size_t size = 0;
|
||||
|
||||
llama_ctx_buffer() = default;
|
||||
|
||||
void resize(size_t size) {
|
||||
free();
|
||||
|
||||
addr = (uint8_t *) ggml_cuda_host_malloc(size);
|
||||
if (addr) {
|
||||
is_cuda = true;
|
||||
}
|
||||
else {
|
||||
// fall back to pageable memory
|
||||
addr = new uint8_t[size];
|
||||
is_cuda = false;
|
||||
}
|
||||
this->size = size;
|
||||
}
|
||||
|
||||
void free() {
|
||||
if (addr) {
|
||||
if (is_cuda) {
|
||||
ggml_cuda_host_free(addr);
|
||||
}
|
||||
else {
|
||||
delete[] addr;
|
||||
}
|
||||
}
|
||||
addr = NULL;
|
||||
}
|
||||
|
||||
~llama_ctx_buffer() {
|
||||
free();
|
||||
}
|
||||
|
||||
// disable copy and move
|
||||
llama_ctx_buffer(const llama_ctx_buffer&) = delete;
|
||||
llama_ctx_buffer(llama_ctx_buffer&&) = delete;
|
||||
llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete;
|
||||
llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete;
|
||||
};
|
||||
#else
|
||||
typedef llama_buffer llama_ctx_buffer;
|
||||
#endif
|
||||
|
||||
#endif
|
4291
llama/llama.cpp
4291
llama/llama.cpp
File diff suppressed because it is too large
Load Diff
439
llama/llama.go
439
llama/llama.go
@@ -1,439 +0,0 @@
|
||||
package llama
|
||||
|
||||
/*
|
||||
#cgo CPPFLAGS: -O3 -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
|
||||
#cgo CXXFLAGS: -std=gnu++11
|
||||
#cgo darwin CPPFLAGS: -DGGML_USE_ACCELERATE
|
||||
#cgo darwin,arm64 CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
|
||||
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
||||
#include <stdlib.h>
|
||||
#include "llama.h"
|
||||
|
||||
struct llama_sample_options
|
||||
{
|
||||
float repeat_penalty;
|
||||
float frequency_penalty;
|
||||
float presence_penalty;
|
||||
float temperature;
|
||||
int32_t top_k;
|
||||
float top_p;
|
||||
float tfs_z;
|
||||
float typical_p;
|
||||
int mirostat;
|
||||
float mirostat_tau;
|
||||
float mirostat_eta;
|
||||
bool penalize_newline;
|
||||
};
|
||||
|
||||
llama_token llama_sample(
|
||||
struct llama_context *ctx,
|
||||
struct llama_token_data *candidates,
|
||||
size_t n_candidates,
|
||||
const llama_token *last_tokens,
|
||||
size_t n_last_tokens,
|
||||
struct llama_sample_options *opts)
|
||||
{
|
||||
llama_token_data_array candidates_p = {
|
||||
candidates,
|
||||
n_candidates,
|
||||
false,
|
||||
};
|
||||
|
||||
struct llama_token_data newline = candidates_p.data[llama_token_nl()];
|
||||
|
||||
llama_sample_repetition_penalty(
|
||||
ctx, &candidates_p,
|
||||
last_tokens, n_last_tokens,
|
||||
opts->repeat_penalty);
|
||||
|
||||
llama_sample_frequency_and_presence_penalties(
|
||||
ctx, &candidates_p,
|
||||
last_tokens, n_last_tokens,
|
||||
opts->frequency_penalty, opts->presence_penalty);
|
||||
|
||||
if (!opts->penalize_newline) {
|
||||
candidates_p.data[llama_token_nl()] = newline;
|
||||
}
|
||||
|
||||
if (opts->temperature <= 0) {
|
||||
return llama_sample_token_greedy(ctx, &candidates_p);
|
||||
}
|
||||
|
||||
if (opts->mirostat == 1) {
|
||||
int mirostat_m = 100;
|
||||
float mirostat_mu = 2.0f * opts->mirostat_tau;
|
||||
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
|
||||
return llama_sample_token_mirostat(
|
||||
ctx, &candidates_p,
|
||||
opts->mirostat_tau, opts->mirostat_eta,
|
||||
mirostat_m, &mirostat_mu);
|
||||
} else if (opts->mirostat == 2) {
|
||||
float mirostat_mu = 2.0f * opts->mirostat_tau;
|
||||
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
|
||||
return llama_sample_token_mirostat_v2(
|
||||
ctx, &candidates_p,
|
||||
opts->mirostat_tau, opts->mirostat_eta,
|
||||
&mirostat_mu);
|
||||
} else {
|
||||
llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
|
||||
llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
|
||||
llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
|
||||
llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
|
||||
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
|
||||
return llama_sample_token(ctx, &candidates_p);
|
||||
}
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
"unsafe"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
//go:embed ggml-metal.metal
|
||||
var fs embed.FS
|
||||
|
||||
type LLM struct {
|
||||
params *C.struct_llama_context_params
|
||||
model *C.struct_llama_model
|
||||
ctx *C.struct_llama_context
|
||||
|
||||
last []C.llama_token
|
||||
embd []C.llama_token
|
||||
cursor int
|
||||
|
||||
mu sync.Mutex
|
||||
gc bool
|
||||
|
||||
api.Options
|
||||
}
|
||||
|
||||
func New(model string, opts api.Options) (*LLM, error) {
|
||||
if _, err := os.Stat(model); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
llm := LLM{Options: opts}
|
||||
|
||||
C.llama_backend_init(C.bool(llm.UseNUMA))
|
||||
|
||||
params := C.llama_context_default_params()
|
||||
params.seed = C.uint(llm.Seed)
|
||||
params.n_ctx = C.int(llm.NumCtx)
|
||||
params.n_batch = C.int(llm.NumBatch)
|
||||
params.n_gqa = C.int(llm.NumGQA)
|
||||
params.n_gpu_layers = C.int(llm.NumGPU)
|
||||
params.main_gpu = C.int(llm.MainGPU)
|
||||
params.low_vram = C.bool(llm.LowVRAM)
|
||||
params.f16_kv = C.bool(llm.F16KV)
|
||||
params.logits_all = C.bool(llm.LogitsAll)
|
||||
params.vocab_only = C.bool(llm.VocabOnly)
|
||||
params.use_mmap = C.bool(llm.UseMMap)
|
||||
params.use_mlock = C.bool(llm.UseMLock)
|
||||
params.embedding = C.bool(llm.EmbeddingOnly)
|
||||
params.rope_freq_base = C.float(llm.RopeFrequencyBase)
|
||||
params.rope_freq_scale = C.float(llm.RopeFrequencyScale)
|
||||
llm.params = ¶ms
|
||||
|
||||
cModel := C.CString(model)
|
||||
defer C.free(unsafe.Pointer(cModel))
|
||||
|
||||
llm.model = C.llama_load_model_from_file(cModel, params)
|
||||
if llm.model == nil {
|
||||
return nil, errors.New("failed to load model")
|
||||
}
|
||||
|
||||
llm.ctx = C.llama_new_context_with_model(llm.model, params)
|
||||
if llm.ctx == nil {
|
||||
return nil, errors.New("failed to create context")
|
||||
}
|
||||
|
||||
// warm up the model
|
||||
bos := []C.llama_token{C.llama_token_bos()}
|
||||
C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
|
||||
C.llama_reset_timings(llm.ctx)
|
||||
|
||||
return &llm, nil
|
||||
}
|
||||
|
||||
func (llm *LLM) Close() {
|
||||
llm.gc = true
|
||||
|
||||
llm.mu.Lock()
|
||||
defer llm.mu.Unlock()
|
||||
|
||||
defer C.llama_free_model(llm.model)
|
||||
defer C.llama_free(llm.ctx)
|
||||
|
||||
C.llama_print_timings(llm.ctx)
|
||||
}
|
||||
|
||||
var errNeedMoreData = errors.New("need more data")
|
||||
|
||||
func (llm *LLM) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
|
||||
C.llama_reset_timings(llm.ctx)
|
||||
|
||||
tokens := make([]C.llama_token, len(ctx))
|
||||
for i := range tokens {
|
||||
tokens[i] = C.llama_token(ctx[i])
|
||||
}
|
||||
|
||||
llm.marshalPrompt(tokens, prompt)
|
||||
|
||||
C.llama_set_rng_seed(llm.ctx, C.uint(llm.Seed))
|
||||
|
||||
var b bytes.Buffer
|
||||
for {
|
||||
token, err := llm.next()
|
||||
if llm.gc {
|
||||
return nil
|
||||
} else if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.WriteString(llm.Decode(token))
|
||||
|
||||
if err := llm.checkStopConditions(b); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if errors.Is(err, errNeedMoreData) {
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
|
||||
fn(api.GenerateResponse{Response: b.String()})
|
||||
b.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
embd := make([]int, len(llm.embd))
|
||||
for i := range llm.embd {
|
||||
embd[i] = int(llm.embd[i])
|
||||
}
|
||||
|
||||
timings := C.llama_get_timings(llm.ctx)
|
||||
fn(api.GenerateResponse{
|
||||
Done: true,
|
||||
Context: embd,
|
||||
SampleCount: int(timings.n_sample),
|
||||
SampleDuration: parseDurationMs(float64(timings.t_sample_ms)),
|
||||
PromptEvalCount: int(timings.n_p_eval),
|
||||
PromptEvalDuration: parseDurationMs(float64(timings.t_p_eval_ms)),
|
||||
EvalCount: int(timings.n_eval),
|
||||
EvalDuration: parseDurationMs(float64(timings.t_eval_ms)),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (llm *LLM) checkStopConditions(b bytes.Buffer) error {
|
||||
for _, stopCondition := range llm.Stop {
|
||||
if stopCondition == strings.TrimSpace(b.String()) {
|
||||
return io.EOF
|
||||
} else if strings.HasPrefix(stopCondition, strings.TrimSpace(b.String())) {
|
||||
return errNeedMoreData
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (llm *LLM) marshalPrompt(ctx []C.llama_token, prompt string) []C.llama_token {
|
||||
tokens := append(ctx, llm.Encode(prompt)...)
|
||||
if llm.NumKeep < 0 {
|
||||
llm.NumKeep = len(tokens)
|
||||
}
|
||||
|
||||
// min(llm.NumCtx - 4, llm.NumKeep)
|
||||
if llm.NumCtx-4 < llm.NumKeep {
|
||||
llm.NumKeep = llm.NumCtx - 4
|
||||
}
|
||||
|
||||
if len(tokens) >= llm.NumCtx {
|
||||
// truncate input
|
||||
numLeft := (llm.NumCtx - llm.NumKeep) / 2
|
||||
truncated := tokens[:llm.NumKeep]
|
||||
erasedBlocks := (len(tokens) - llm.NumKeep - numLeft - 1) / numLeft
|
||||
truncated = append(truncated, tokens[llm.NumKeep+erasedBlocks*numLeft:]...)
|
||||
copy(llm.last, tokens[len(tokens)-llm.NumCtx:])
|
||||
|
||||
tokens = truncated
|
||||
log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated))
|
||||
} else {
|
||||
llm.last = make([]C.llama_token, llm.NumCtx-len(tokens))
|
||||
llm.last = append(llm.last, tokens...)
|
||||
}
|
||||
|
||||
var i int
|
||||
for i = 0; i < len(llm.embd) && i < len(tokens) && llm.embd[i] == tokens[i]; i++ {
|
||||
// noop
|
||||
}
|
||||
|
||||
llm.embd = tokens
|
||||
if i == len(tokens) {
|
||||
// evaluate at least one token to generate logits
|
||||
i--
|
||||
}
|
||||
|
||||
llm.cursor = i
|
||||
|
||||
log.Printf("prompt: num_past=%d cached=%v eval=%v", i, len(llm.embd[:i]), len(llm.embd[i:]))
|
||||
return tokens
|
||||
}
|
||||
|
||||
func (llm *LLM) Encode(prompt string) []C.llama_token {
|
||||
cPrompt := C.CString(prompt)
|
||||
defer C.free(unsafe.Pointer(cPrompt))
|
||||
|
||||
tokens := make([]C.llama_token, len(prompt)+1)
|
||||
if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(tokens), C.int(len(tokens)), true); n > 0 {
|
||||
return tokens[:n]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (llm *LLM) Decode(tokens ...C.llama_token) string {
|
||||
var sb strings.Builder
|
||||
for _, token := range tokens {
|
||||
sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, token)))
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (llm *LLM) next() (C.llama_token, error) {
|
||||
llm.mu.Lock()
|
||||
defer llm.mu.Unlock()
|
||||
|
||||
if len(llm.embd) >= llm.NumCtx {
|
||||
numLeft := (llm.NumCtx - llm.NumKeep) / 2
|
||||
truncated := llm.embd[:llm.NumKeep]
|
||||
truncated = append(truncated, llm.embd[len(llm.embd)-numLeft:]...)
|
||||
|
||||
llm.embd = truncated
|
||||
llm.cursor = llm.NumKeep
|
||||
log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d cursor=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated), llm.cursor)
|
||||
}
|
||||
|
||||
for {
|
||||
if llm.gc {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if llm.cursor >= len(llm.embd) {
|
||||
break
|
||||
}
|
||||
|
||||
numEval := len(llm.embd) - llm.cursor
|
||||
if numEval > llm.NumBatch {
|
||||
numEval = llm.NumBatch
|
||||
}
|
||||
|
||||
if retval := C.llama_eval(llm.ctx, unsafe.SliceData(llm.embd[llm.cursor:]), C.int(numEval), C.int(llm.cursor), C.int(llm.NumThread)); retval != 0 {
|
||||
return 0, fmt.Errorf("llama_eval: %d", retval)
|
||||
}
|
||||
|
||||
llm.cursor += numEval
|
||||
}
|
||||
|
||||
var sampleOpts C.struct_llama_sample_options
|
||||
sampleOpts.repeat_penalty = C.float(llm.RepeatPenalty)
|
||||
sampleOpts.frequency_penalty = C.float(llm.FrequencyPenalty)
|
||||
sampleOpts.presence_penalty = C.float(llm.PresencePenalty)
|
||||
sampleOpts.temperature = C.float(llm.Temperature)
|
||||
sampleOpts.top_k = C.int(llm.TopK)
|
||||
sampleOpts.top_p = C.float(llm.TopP)
|
||||
sampleOpts.tfs_z = C.float(llm.TFSZ)
|
||||
sampleOpts.typical_p = C.float(llm.TypicalP)
|
||||
sampleOpts.mirostat = C.int(llm.Mirostat)
|
||||
sampleOpts.mirostat_tau = C.float(llm.MirostatTau)
|
||||
sampleOpts.mirostat_eta = C.float(llm.MirostatEta)
|
||||
sampleOpts.penalize_newline = C.bool(llm.PenalizeNewline)
|
||||
|
||||
numVocab := C.llama_n_vocab(llm.ctx)
|
||||
logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)
|
||||
|
||||
// TODO: logit bias
|
||||
|
||||
candidates := make([]C.llama_token_data, numVocab)
|
||||
for i := range logits {
|
||||
candidates[i] = C.llama_token_data{
|
||||
id: C.int(i),
|
||||
logit: logits[i],
|
||||
p: 0,
|
||||
}
|
||||
}
|
||||
|
||||
repeatLastN := llm.RepeatLastN
|
||||
if len(llm.last) < repeatLastN {
|
||||
repeatLastN = len(llm.last)
|
||||
}
|
||||
|
||||
if llm.NumCtx < repeatLastN {
|
||||
repeatLastN = llm.NumCtx
|
||||
}
|
||||
|
||||
lastN := llm.last[len(llm.last)-repeatLastN:]
|
||||
|
||||
token := C.llama_sample(
|
||||
llm.ctx,
|
||||
unsafe.SliceData(candidates), C.size_t(len(candidates)),
|
||||
unsafe.SliceData(lastN), C.size_t(len(lastN)),
|
||||
&sampleOpts,
|
||||
)
|
||||
|
||||
llm.last = append(llm.last, token)
|
||||
llm.embd = append(llm.embd, token)
|
||||
|
||||
if token == C.llama_token_eos() {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (llm *LLM) Embedding(input string) ([]float64, error) {
|
||||
if !llm.EmbeddingOnly {
|
||||
return nil, errors.New("llama: embedding not enabled")
|
||||
}
|
||||
|
||||
tokens := llm.Encode(input)
|
||||
if tokens == nil {
|
||||
return nil, errors.New("llama: tokenize embedding")
|
||||
}
|
||||
|
||||
retval := C.llama_eval(llm.ctx, unsafe.SliceData(tokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
|
||||
if retval != 0 {
|
||||
return nil, errors.New("llama: eval")
|
||||
}
|
||||
|
||||
n := C.llama_n_embd(llm.ctx)
|
||||
if n <= 0 {
|
||||
return nil, errors.New("llama: no embeddings generated")
|
||||
}
|
||||
cEmbeddings := unsafe.Slice(C.llama_get_embeddings(llm.ctx), n)
|
||||
|
||||
embeddings := make([]float64, len(cEmbeddings))
|
||||
for i, v := range cEmbeddings {
|
||||
embeddings[i] = float64(v)
|
||||
}
|
||||
return embeddings, nil
|
||||
}
|
495
llama/llama.h
495
llama/llama.h
@@ -1,495 +0,0 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
* Copyright (c) 2023 Georgi Gerganov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all
|
||||
* copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef LLAMA_H
|
||||
#define LLAMA_H
|
||||
|
||||
#include "ggml.h"
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
#include "ggml-cuda.h"
|
||||
#define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES
|
||||
#else
|
||||
#define LLAMA_MAX_DEVICES 1
|
||||
#endif // GGML_USE_CUBLAS
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef LLAMA_SHARED
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef LLAMA_BUILD
|
||||
# define LLAMA_API __declspec(dllexport)
|
||||
# else
|
||||
# define LLAMA_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
# define LLAMA_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
#else
|
||||
# define LLAMA_API
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
# define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
|
||||
#elif defined(_MSC_VER)
|
||||
# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
|
||||
#else
|
||||
# define DEPRECATED(func, hint) func
|
||||
#endif
|
||||
|
||||
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
|
||||
#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
|
||||
#define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf'
|
||||
#define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml'
|
||||
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
||||
|
||||
#define LLAMA_FILE_VERSION 3
|
||||
#define LLAMA_FILE_MAGIC LLAMA_FILE_MAGIC_GGJT
|
||||
#define LLAMA_FILE_MAGIC_UNVERSIONED LLAMA_FILE_MAGIC_GGML
|
||||
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
||||
#define LLAMA_SESSION_VERSION 1
|
||||
|
||||
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
|
||||
|
||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
|
||||
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
||||
#define LLAMA_SUPPORTS_GPU_OFFLOAD
|
||||
#endif
|
||||
|
||||
#ifndef LLAMA_DEFAULT_RMS_EPS
|
||||
#define LLAMA_DEFAULT_RMS_EPS 5e-6f
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
//
|
||||
// C interface
|
||||
//
|
||||
// TODO: show sample usage
|
||||
//
|
||||
|
||||
struct llama_model;
|
||||
struct llama_context;
|
||||
|
||||
typedef int llama_token;
|
||||
|
||||
typedef struct llama_token_data {
|
||||
llama_token id; // token id
|
||||
float logit; // log-odds of the token
|
||||
float p; // probability of the token
|
||||
} llama_token_data;
|
||||
|
||||
typedef struct llama_token_data_array {
|
||||
llama_token_data * data;
|
||||
size_t size;
|
||||
bool sorted;
|
||||
} llama_token_data_array;
|
||||
|
||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||
|
||||
struct llama_context_params {
|
||||
uint32_t seed; // RNG seed, -1 for random
|
||||
int32_t n_ctx; // text context
|
||||
int32_t n_batch; // prompt processing batch size
|
||||
int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams)
|
||||
float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams)
|
||||
int32_t n_gpu_layers; // number of layers to store in VRAM
|
||||
int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
||||
|
||||
const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
|
||||
|
||||
// ref: https://github.com/ggerganov/llama.cpp/pull/2054
|
||||
float rope_freq_base; // RoPE base frequency
|
||||
float rope_freq_scale; // RoPE frequency scaling factor
|
||||
|
||||
// called with a progress value between 0 and 1, pass NULL to disable
|
||||
llama_progress_callback progress_callback;
|
||||
// context pointer passed to the progress callback
|
||||
void * progress_callback_user_data;
|
||||
|
||||
// Keep the booleans together to avoid misalignment during copy-by-value.
|
||||
bool low_vram; // if true, reduce VRAM usage at the cost of performance
|
||||
bool mul_mat_q; // if true, use experimental mul_mat_q kernels
|
||||
bool f16_kv; // use fp16 for KV cache
|
||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
bool embedding; // embedding mode only
|
||||
};
|
||||
// model file types
|
||||
enum llama_ftype {
|
||||
LLAMA_FTYPE_ALL_F32 = 0,
|
||||
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
||||
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
||||
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
|
||||
};
|
||||
|
||||
// model quantization parameters
|
||||
typedef struct llama_model_quantize_params {
|
||||
int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
|
||||
enum llama_ftype ftype; // quantize to this llama_ftype
|
||||
bool allow_requantize; // allow quantizing non-f32/f16 tensors
|
||||
bool quantize_output_tensor; // quantize output.weight
|
||||
} llama_model_quantize_params;
|
||||
|
||||
// grammar types
|
||||
struct llama_grammar;
|
||||
|
||||
// grammar element type
|
||||
enum llama_gretype {
|
||||
// end of rule definition
|
||||
LLAMA_GRETYPE_END = 0,
|
||||
|
||||
// start of alternate definition for rule
|
||||
LLAMA_GRETYPE_ALT = 1,
|
||||
|
||||
// non-terminal element: reference to rule
|
||||
LLAMA_GRETYPE_RULE_REF = 2,
|
||||
|
||||
// terminal element: character (code point)
|
||||
LLAMA_GRETYPE_CHAR = 3,
|
||||
|
||||
// inverse char(s) ([^a], [^a-b] [^abc])
|
||||
LLAMA_GRETYPE_CHAR_NOT = 4,
|
||||
|
||||
// modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
|
||||
// be an inclusive range ([a-z])
|
||||
LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
|
||||
|
||||
// modifies a preceding LLAMA_GRETYPE_CHAR or
|
||||
// LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
|
||||
LLAMA_GRETYPE_CHAR_ALT = 6,
|
||||
};
|
||||
|
||||
typedef struct llama_grammar_element {
|
||||
enum llama_gretype type;
|
||||
uint32_t value; // Unicode code point or rule ID
|
||||
} llama_grammar_element;
|
||||
|
||||
// performance timing information
|
||||
struct llama_timings {
|
||||
double t_start_ms;
|
||||
double t_end_ms;
|
||||
double t_load_ms;
|
||||
double t_sample_ms;
|
||||
double t_p_eval_ms;
|
||||
double t_eval_ms;
|
||||
|
||||
int32_t n_sample;
|
||||
int32_t n_p_eval;
|
||||
int32_t n_eval;
|
||||
};
|
||||
|
||||
LLAMA_API int llama_max_devices();
|
||||
|
||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
||||
|
||||
LLAMA_API bool llama_mmap_supported();
|
||||
LLAMA_API bool llama_mlock_supported();
|
||||
|
||||
// TODO: not great API - very likely to change
|
||||
// Initialize the llama + ggml backend
|
||||
// If numa is true, use NUMA optimizations
|
||||
// Call once at the start of the program
|
||||
LLAMA_API void llama_backend_init(bool numa);
|
||||
// Call once at the end of the program - currently only used for MPI
|
||||
LLAMA_API void llama_backend_free();
|
||||
|
||||
LLAMA_API int64_t llama_time_us();
|
||||
|
||||
LLAMA_API struct llama_model * llama_load_model_from_file(
|
||||
const char * path_model,
|
||||
struct llama_context_params params);
|
||||
|
||||
LLAMA_API void llama_free_model(struct llama_model * model);
|
||||
|
||||
LLAMA_API struct llama_context * llama_new_context_with_model(
|
||||
struct llama_model * model,
|
||||
struct llama_context_params params);
|
||||
|
||||
// Various functions for loading a ggml llama model.
|
||||
// Allocate (almost) all memory needed for the model.
|
||||
// Return NULL on failure
|
||||
LLAMA_API DEPRECATED(struct llama_context * llama_init_from_file(
|
||||
const char * path_model,
|
||||
struct llama_context_params params),
|
||||
"please use llama_load_model_from_file combined with llama_new_context_with_model instead");
|
||||
|
||||
// Frees all allocated memory
|
||||
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||
|
||||
// Returns 0 on success
|
||||
LLAMA_API int llama_model_quantize(
|
||||
const char * fname_inp,
|
||||
const char * fname_out,
|
||||
const llama_model_quantize_params * params);
|
||||
|
||||
// Apply a LoRA adapter to a loaded model
|
||||
// path_base_model is the path to a higher quality model to use as a base for
|
||||
// the layers modified by the adapter. Can be NULL to use the current loaded model.
|
||||
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
|
||||
// will be applied on top of the previous one
|
||||
// Returns 0 on success
|
||||
LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
|
||||
struct llama_context * ctx,
|
||||
const char * path_lora,
|
||||
const char * path_base_model,
|
||||
int n_threads),
|
||||
"please use llama_model_apply_lora_from_file instead");
|
||||
|
||||
LLAMA_API int llama_model_apply_lora_from_file(
|
||||
const struct llama_model * model,
|
||||
const char * path_lora,
|
||||
const char * path_base_model,
|
||||
int n_threads);
|
||||
|
||||
// Returns the number of tokens in the KV cache
|
||||
LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
|
||||
|
||||
// Sets the current rng seed.
|
||||
LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
|
||||
|
||||
// Returns the maximum size in bytes of the state (rng, logits, embedding
|
||||
// and kv_cache) - will often be smaller after compacting tokens
|
||||
LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
|
||||
|
||||
// Copies the state to the specified destination address.
|
||||
// Destination needs to have allocated enough memory.
|
||||
// Returns the number of bytes copied
|
||||
LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst);
|
||||
|
||||
// Set the state reading from the specified address
|
||||
// Returns the number of bytes read
|
||||
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src);
|
||||
|
||||
// Save/load session file
|
||||
LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
|
||||
LLAMA_API bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count);
|
||||
|
||||
// Run the llama inference to obtain the logits and probabilities for the next token.
|
||||
// tokens + n_tokens is the provided batch of new tokens to process
|
||||
// n_past is the number of tokens to use from previous eval calls
|
||||
// Returns 0 on success
|
||||
LLAMA_API int llama_eval(
|
||||
struct llama_context * ctx,
|
||||
const llama_token * tokens,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads);
|
||||
|
||||
// Same as llama_eval, but use float matrix input directly.
|
||||
LLAMA_API int llama_eval_embd(
|
||||
struct llama_context * ctx,
|
||||
const float * embd,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads);
|
||||
|
||||
// Export a static computation graph for context of 511 and batch size of 1
|
||||
// NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these
|
||||
// parameters here to keep things simple
|
||||
// IMPORTANT: do not use for anything else other than debugging and testing!
|
||||
LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
|
||||
|
||||
// Convert the provided text into tokens.
|
||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||
// Returns the number of tokens on success, no more than n_max_tokens
|
||||
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||
// TODO: not sure if correct
|
||||
LLAMA_API int llama_tokenize(
|
||||
struct llama_context * ctx,
|
||||
const char * text,
|
||||
llama_token * tokens,
|
||||
int n_max_tokens,
|
||||
bool add_bos);
|
||||
|
||||
LLAMA_API int llama_tokenize_with_model(
|
||||
const struct llama_model * model,
|
||||
const char * text,
|
||||
llama_token * tokens,
|
||||
int n_max_tokens,
|
||||
bool add_bos);
|
||||
|
||||
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||
|
||||
LLAMA_API int llama_n_vocab_from_model(const struct llama_model * model);
|
||||
LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model);
|
||||
LLAMA_API int llama_n_embd_from_model (const struct llama_model * model);
|
||||
|
||||
// Get the vocabulary as output parameters.
|
||||
// Returns number of results.
|
||||
LLAMA_API int llama_get_vocab(
|
||||
const struct llama_context * ctx,
|
||||
const char * * strings,
|
||||
float * scores,
|
||||
int capacity);
|
||||
|
||||
LLAMA_API int llama_get_vocab_from_model(
|
||||
const struct llama_model * model,
|
||||
const char * * strings,
|
||||
float * scores,
|
||||
int capacity);
|
||||
|
||||
// Token logits obtained from the last call to llama_eval()
|
||||
// The logits for the last token are stored in the last row
|
||||
// Can be mutated in order to change the probabilities of the next token
|
||||
// Rows: n_tokens
|
||||
// Cols: n_vocab
|
||||
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
||||
|
||||
// Get the embeddings for the input
|
||||
// shape: [n_embd] (1-dimensional)
|
||||
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
||||
|
||||
// Token Id -> String. Uses the vocabulary in the provided context
|
||||
LLAMA_API const char * llama_token_to_str(
|
||||
const struct llama_context * ctx,
|
||||
llama_token token);
|
||||
|
||||
LLAMA_API const char * llama_token_to_str_with_model(
|
||||
const struct llama_model * model,
|
||||
llama_token token);
|
||||
|
||||
// Special tokens
|
||||
LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence
|
||||
LLAMA_API llama_token llama_token_eos(); // end-of-sentence
|
||||
LLAMA_API llama_token llama_token_nl(); // next-line
|
||||
|
||||
// Grammar
|
||||
//
|
||||
LLAMA_API struct llama_grammar * llama_grammar_init(
|
||||
const llama_grammar_element ** rules,
|
||||
size_t n_rules,
|
||||
size_t start_rule_index);
|
||||
|
||||
LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
|
||||
|
||||
// Sampling functions
|
||||
|
||||
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
||||
LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
|
||||
|
||||
/// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
|
||||
LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
|
||||
|
||||
/// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
|
||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted.
|
||||
/// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
|
||||
/// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
|
||||
LLAMA_API void llama_sample_classifier_free_guidance(
|
||||
struct llama_context * ctx,
|
||||
llama_token_data_array * candidates,
|
||||
struct llama_context * guidance_ctx,
|
||||
float scale);
|
||||
|
||||
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
||||
LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
|
||||
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep);
|
||||
|
||||
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||
|
||||
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
|
||||
LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep);
|
||||
|
||||
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
||||
LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||
LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
|
||||
|
||||
/// @details Apply constraints from grammar
|
||||
LLAMA_API void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar);
|
||||
|
||||
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
|
||||
|
||||
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
|
||||
|
||||
/// @details Selects the token with the highest probability.
|
||||
LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
|
||||
/// @details Randomly selects a token from the candidates based on their probabilities.
|
||||
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
|
||||
/// @details Accepts the sampled token into the grammar
|
||||
LLAMA_API void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token);
|
||||
|
||||
// Performance information
|
||||
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
|
||||
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
||||
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
||||
|
||||
// Print system information
|
||||
LLAMA_API const char * llama_print_system_info(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// Internal API to be implemented by llama.cpp and used by tests/benchmarks only
|
||||
#ifdef LLAMA_API_INTERNAL
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
struct ggml_tensor;
|
||||
|
||||
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||
|
||||
#endif
|
||||
|
||||
#endif // LLAMA_H
|
@@ -1,80 +0,0 @@
|
||||
package llama
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := initBackend(); err != nil {
|
||||
log.Printf("WARNING: GPU could not be initialized correctly: %v", err)
|
||||
log.Printf("WARNING: falling back to CPU")
|
||||
}
|
||||
}
|
||||
|
||||
func initBackend() error {
|
||||
exec, err := os.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
exec, err = filepath.EvalSymlinks(exec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metal := filepath.Join(filepath.Dir(exec), "ggml-metal.metal")
|
||||
fi, err := os.Stat(metal)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi != nil {
|
||||
actual, err := os.Open(metal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
actualSum := sha256.New()
|
||||
if _, err := io.Copy(actualSum, actual); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expect, err := fs.Open("ggml-metal.metal")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
expectSum := sha256.New()
|
||||
if _, err := io.Copy(expectSum, expect); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bytes.Equal(actualSum.Sum(nil), expectSum.Sum(nil)) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
dst, err := os.Create(filepath.Join(filepath.Dir(exec), "ggml-metal.metal"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
src, err := fs.Open("ggml-metal.metal")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
20
llm/falcon.go
Normal file
20
llm/falcon.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package llm
|
||||
|
||||
const (
|
||||
falconModelType7B = 32
|
||||
falconModelType40B = 60
|
||||
falconModelType180B = 80
|
||||
)
|
||||
|
||||
func falconModelType(numLayer uint32) string {
|
||||
switch numLayer {
|
||||
case 32:
|
||||
return "7B"
|
||||
case 60:
|
||||
return "40B"
|
||||
case 80:
|
||||
return "180B"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
209
llm/ggml.go
Normal file
209
llm/ggml.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package llm
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type GGML struct {
|
||||
magic uint32
|
||||
container
|
||||
model
|
||||
}
|
||||
|
||||
const (
|
||||
fileTypeF32 uint32 = iota
|
||||
fileTypeF16
|
||||
fileTypeQ4_0
|
||||
fileTypeQ4_1
|
||||
fileTypeQ4_1_F16
|
||||
fileTypeQ8_0 uint32 = iota + 2
|
||||
fileTypeQ5_0
|
||||
fileTypeQ5_1
|
||||
fileTypeQ2_K
|
||||
fileTypeQ3_K_S
|
||||
fileTypeQ3_K_M
|
||||
fileTypeQ3_K_L
|
||||
fileTypeQ4_K_S
|
||||
fileTypeQ4_K_M
|
||||
fileTypeQ5_K_S
|
||||
fileTypeQ5_K_M
|
||||
fileTypeQ6_K
|
||||
)
|
||||
|
||||
func fileType(fileType uint32) string {
|
||||
switch fileType {
|
||||
case fileTypeF32:
|
||||
return "F32"
|
||||
case fileTypeF16:
|
||||
return "F16"
|
||||
case fileTypeQ4_0:
|
||||
return "Q4_0"
|
||||
case fileTypeQ4_1:
|
||||
return "Q4_1"
|
||||
case fileTypeQ4_1_F16:
|
||||
return "Q4_1_F16"
|
||||
case fileTypeQ8_0:
|
||||
return "Q8_0"
|
||||
case fileTypeQ5_0:
|
||||
return "Q5_0"
|
||||
case fileTypeQ5_1:
|
||||
return "Q5_1"
|
||||
case fileTypeQ2_K:
|
||||
return "Q2_K"
|
||||
case fileTypeQ3_K_S:
|
||||
return "Q3_K_S"
|
||||
case fileTypeQ3_K_M:
|
||||
return "Q3_K_M"
|
||||
case fileTypeQ3_K_L:
|
||||
return "Q3_K_L"
|
||||
case fileTypeQ4_K_S:
|
||||
return "Q4_K_S"
|
||||
case fileTypeQ4_K_M:
|
||||
return "Q4_K_M"
|
||||
case fileTypeQ5_K_S:
|
||||
return "Q5_K_S"
|
||||
case fileTypeQ5_K_M:
|
||||
return "Q5_K_M"
|
||||
case fileTypeQ6_K:
|
||||
return "Q6_K"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type model interface {
|
||||
ModelFamily() string
|
||||
ModelType() string
|
||||
FileType() string
|
||||
NumLayers() int64
|
||||
}
|
||||
|
||||
type container interface {
|
||||
Name() string
|
||||
Decode(io.Reader) (model, error)
|
||||
}
|
||||
|
||||
type containerGGML struct{}
|
||||
|
||||
func (c *containerGGML) Name() string {
|
||||
return "ggml"
|
||||
}
|
||||
|
||||
func (c *containerGGML) Decode(r io.Reader) (model, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type containerGGMF struct {
|
||||
version uint32
|
||||
}
|
||||
|
||||
func (c *containerGGMF) Name() string {
|
||||
return "ggmf"
|
||||
}
|
||||
|
||||
func (c *containerGGMF) Decode(r io.Reader) (model, error) {
|
||||
var version uint32
|
||||
binary.Read(r, binary.LittleEndian, &version)
|
||||
|
||||
switch version {
|
||||
case 1:
|
||||
default:
|
||||
return nil, errors.New("invalid version")
|
||||
}
|
||||
|
||||
c.version = version
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type containerGGJT struct {
|
||||
version uint32
|
||||
}
|
||||
|
||||
func (c *containerGGJT) Name() string {
|
||||
return "ggjt"
|
||||
}
|
||||
|
||||
func (c *containerGGJT) Decode(r io.Reader) (model, error) {
|
||||
var version uint32
|
||||
binary.Read(r, binary.LittleEndian, &version)
|
||||
|
||||
switch version {
|
||||
case 1, 2, 3:
|
||||
default:
|
||||
return nil, errors.New("invalid version")
|
||||
}
|
||||
|
||||
c.version = version
|
||||
|
||||
// different model types may have different layouts for hyperparameters
|
||||
var llama llamaModel
|
||||
binary.Read(r, binary.LittleEndian, &llama.hyperparameters)
|
||||
return &llama, nil
|
||||
}
|
||||
|
||||
type containerLORA struct {
|
||||
version uint32
|
||||
}
|
||||
|
||||
func (c *containerLORA) Name() string {
|
||||
return "ggla"
|
||||
}
|
||||
|
||||
func (c *containerLORA) Decode(r io.Reader) (model, error) {
|
||||
var version uint32
|
||||
binary.Read(r, binary.LittleEndian, &version)
|
||||
|
||||
switch version {
|
||||
case 1:
|
||||
default:
|
||||
return nil, errors.New("invalid version")
|
||||
}
|
||||
|
||||
c.version = version
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
const (
|
||||
// Magic constant for `ggml` files (unversioned).
|
||||
FILE_MAGIC_GGML = 0x67676d6c
|
||||
// Magic constant for `ggml` files (versioned, ggmf).
|
||||
FILE_MAGIC_GGMF = 0x67676d66
|
||||
// Magic constant for `ggml` files (versioned, ggjt).
|
||||
FILE_MAGIC_GGJT = 0x67676a74
|
||||
// Magic constant for `ggla` files (LoRA adapter).
|
||||
FILE_MAGIC_GGLA = 0x67676C61
|
||||
// Magic constant for `gguf` files (versioned, gguf)
|
||||
FILE_MAGIC_GGUF = 0x46554747
|
||||
)
|
||||
|
||||
func DecodeGGML(r io.ReadSeeker) (*GGML, error) {
|
||||
var ggml GGML
|
||||
binary.Read(r, binary.LittleEndian, &ggml.magic)
|
||||
|
||||
switch ggml.magic {
|
||||
case FILE_MAGIC_GGML:
|
||||
ggml.container = &containerGGML{}
|
||||
case FILE_MAGIC_GGMF:
|
||||
ggml.container = &containerGGMF{}
|
||||
case FILE_MAGIC_GGJT:
|
||||
ggml.container = &containerGGJT{}
|
||||
case FILE_MAGIC_GGLA:
|
||||
ggml.container = &containerLORA{}
|
||||
case FILE_MAGIC_GGUF:
|
||||
ggml.container = &containerGGUF{}
|
||||
default:
|
||||
return nil, errors.New("invalid file magic")
|
||||
}
|
||||
|
||||
model, err := ggml.Decode(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ggml.model = model
|
||||
|
||||
// final model type
|
||||
return &ggml, nil
|
||||
}
|
383
llm/gguf.go
Normal file
383
llm/gguf.go
Normal file
@@ -0,0 +1,383 @@
|
||||
package llm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type containerGGUF struct {
|
||||
Version uint32
|
||||
|
||||
V1 struct {
|
||||
NumTensor uint32
|
||||
NumKV uint32
|
||||
}
|
||||
|
||||
V2 struct {
|
||||
NumTensor uint64
|
||||
NumKV uint64
|
||||
}
|
||||
}
|
||||
|
||||
func (c *containerGGUF) Name() string {
|
||||
return "gguf"
|
||||
}
|
||||
|
||||
func (c *containerGGUF) Decode(r io.Reader) (model, error) {
|
||||
binary.Read(r, binary.LittleEndian, &c.Version)
|
||||
|
||||
switch c.Version {
|
||||
case 1:
|
||||
binary.Read(r, binary.LittleEndian, &c.V1)
|
||||
case 2:
|
||||
binary.Read(r, binary.LittleEndian, &c.V2)
|
||||
default:
|
||||
return nil, errors.New("invalid version")
|
||||
}
|
||||
|
||||
model := newGGUFModel(c)
|
||||
if err := model.Decode(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return model, nil
|
||||
}
|
||||
|
||||
const (
|
||||
ggufTypeUint8 uint32 = iota
|
||||
ggufTypeInt8
|
||||
ggufTypeUint16
|
||||
ggufTypeInt16
|
||||
ggufTypeUint32
|
||||
ggufTypeInt32
|
||||
ggufTypeFloat32
|
||||
ggufTypeBool
|
||||
ggufTypeString
|
||||
ggufTypeArray
|
||||
ggufTypeUint64
|
||||
ggufTypeInt64
|
||||
ggufTypeFloat64
|
||||
)
|
||||
|
||||
type kv map[string]any
|
||||
|
||||
type ggufModel struct {
|
||||
*containerGGUF
|
||||
kv
|
||||
}
|
||||
|
||||
func newGGUFModel(container *containerGGUF) *ggufModel {
|
||||
return &ggufModel{
|
||||
containerGGUF: container,
|
||||
kv: make(kv),
|
||||
}
|
||||
}
|
||||
|
||||
func (llm *ggufModel) NumKV() uint64 {
|
||||
if llm.Version == 1 {
|
||||
return uint64(llm.V1.NumKV)
|
||||
}
|
||||
|
||||
return llm.V2.NumKV
|
||||
}
|
||||
|
||||
func (llm *ggufModel) ModelFamily() string {
|
||||
t, ok := llm.kv["general.architecture"].(string)
|
||||
if ok {
|
||||
return t
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (llm *ggufModel) ModelType() string {
|
||||
switch llm.ModelFamily() {
|
||||
case "llama":
|
||||
if blocks, ok := llm.kv["llama.block_count"].(uint32); ok {
|
||||
heads, headsOK := llm.kv["llama.head_count"].(uint32)
|
||||
headKVs, headsKVsOK := llm.kv["llama.head_count_kv"].(uint32)
|
||||
if headsOK && headsKVsOK && heads/headKVs == 8 {
|
||||
return "70B"
|
||||
}
|
||||
|
||||
return llamaModelType(blocks)
|
||||
}
|
||||
case "falcon":
|
||||
if blocks, ok := llm.kv["falcon.block_count"].(uint32); ok {
|
||||
return falconModelType(blocks)
|
||||
}
|
||||
case "starcoder":
|
||||
if blocks, ok := llm.kv["starcoder.block_count"].(uint32); ok {
|
||||
return starCoderModelType(blocks)
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (llm *ggufModel) FileType() string {
|
||||
t, ok := llm.kv["general.file_type"].(uint32)
|
||||
if ok {
|
||||
return fileType(t)
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (llm *ggufModel) Decode(r io.Reader) error {
|
||||
read := llm.readString
|
||||
if llm.Version == 1 {
|
||||
read = llm.readStringV1
|
||||
}
|
||||
|
||||
for i := 0; uint64(i) < llm.NumKV(); i++ {
|
||||
k, err := read(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vtype := llm.readU32(r)
|
||||
|
||||
var v any
|
||||
switch vtype {
|
||||
case ggufTypeUint8:
|
||||
v = llm.readU8(r)
|
||||
case ggufTypeInt8:
|
||||
v = llm.readI8(r)
|
||||
case ggufTypeUint16:
|
||||
v = llm.readU16(r)
|
||||
case ggufTypeInt16:
|
||||
v = llm.readI16(r)
|
||||
case ggufTypeUint32:
|
||||
v = llm.readU32(r)
|
||||
case ggufTypeInt32:
|
||||
v = llm.readI32(r)
|
||||
case ggufTypeUint64:
|
||||
v = llm.readU64(r)
|
||||
case ggufTypeInt64:
|
||||
v = llm.readI64(r)
|
||||
case ggufTypeFloat32:
|
||||
v = llm.readF32(r)
|
||||
case ggufTypeFloat64:
|
||||
v = llm.readF64(r)
|
||||
case ggufTypeBool:
|
||||
v = llm.readBool(r)
|
||||
case ggufTypeString:
|
||||
fn := llm.readString
|
||||
if llm.Version == 1 {
|
||||
fn = llm.readStringV1
|
||||
}
|
||||
|
||||
s, err := fn(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v = s
|
||||
case ggufTypeArray:
|
||||
fn := llm.readArray
|
||||
if llm.Version == 1 {
|
||||
fn = llm.readArrayV1
|
||||
}
|
||||
|
||||
a, err := fn(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v = a
|
||||
default:
|
||||
return fmt.Errorf("invalid type: %d", vtype)
|
||||
}
|
||||
|
||||
llm.kv[k] = v
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (llm *ggufModel) NumLayers() int64 {
|
||||
value, exists := llm.kv[fmt.Sprintf("%s.block_count", llm.ModelFamily())]
|
||||
if !exists {
|
||||
return 0
|
||||
}
|
||||
|
||||
v := value.(uint32)
|
||||
return int64(v)
|
||||
}
|
||||
|
||||
func (ggufModel) readU8(r io.Reader) uint8 {
|
||||
var u8 uint8
|
||||
binary.Read(r, binary.LittleEndian, &u8)
|
||||
return u8
|
||||
}
|
||||
|
||||
func (ggufModel) readI8(r io.Reader) int8 {
|
||||
var i8 int8
|
||||
binary.Read(r, binary.LittleEndian, &i8)
|
||||
return i8
|
||||
}
|
||||
|
||||
func (ggufModel) readU16(r io.Reader) uint16 {
|
||||
var u16 uint16
|
||||
binary.Read(r, binary.LittleEndian, &u16)
|
||||
return u16
|
||||
}
|
||||
|
||||
func (ggufModel) readI16(r io.Reader) int16 {
|
||||
var i16 int16
|
||||
binary.Read(r, binary.LittleEndian, &i16)
|
||||
return i16
|
||||
}
|
||||
|
||||
func (ggufModel) readU32(r io.Reader) uint32 {
|
||||
var u32 uint32
|
||||
binary.Read(r, binary.LittleEndian, &u32)
|
||||
return u32
|
||||
}
|
||||
|
||||
func (ggufModel) readI32(r io.Reader) int32 {
|
||||
var i32 int32
|
||||
binary.Read(r, binary.LittleEndian, &i32)
|
||||
return i32
|
||||
}
|
||||
|
||||
func (ggufModel) readU64(r io.Reader) uint64 {
|
||||
var u64 uint64
|
||||
binary.Read(r, binary.LittleEndian, &u64)
|
||||
return u64
|
||||
}
|
||||
|
||||
func (ggufModel) readI64(r io.Reader) int64 {
|
||||
var i64 int64
|
||||
binary.Read(r, binary.LittleEndian, &i64)
|
||||
return i64
|
||||
}
|
||||
|
||||
func (ggufModel) readF32(r io.Reader) float32 {
|
||||
var f32 float32
|
||||
binary.Read(r, binary.LittleEndian, &f32)
|
||||
return f32
|
||||
}
|
||||
|
||||
func (ggufModel) readF64(r io.Reader) float64 {
|
||||
var f64 float64
|
||||
binary.Read(r, binary.LittleEndian, &f64)
|
||||
return f64
|
||||
}
|
||||
|
||||
func (ggufModel) readBool(r io.Reader) bool {
|
||||
var b bool
|
||||
binary.Read(r, binary.LittleEndian, &b)
|
||||
return b
|
||||
}
|
||||
|
||||
func (ggufModel) readStringV1(r io.Reader) (string, error) {
|
||||
var nameLength uint32
|
||||
binary.Read(r, binary.LittleEndian, &nameLength)
|
||||
|
||||
var b bytes.Buffer
|
||||
if _, err := io.CopyN(&b, r, int64(nameLength)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// gguf v1 strings are null-terminated
|
||||
b.Truncate(b.Len() - 1)
|
||||
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func (llm ggufModel) readString(r io.Reader) (string, error) {
|
||||
var nameLength uint64
|
||||
binary.Read(r, binary.LittleEndian, &nameLength)
|
||||
|
||||
var b bytes.Buffer
|
||||
if _, err := io.CopyN(&b, r, int64(nameLength)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func (llm *ggufModel) readArrayV1(r io.Reader) (arr []any, err error) {
|
||||
atype := llm.readU32(r)
|
||||
n := llm.readU32(r)
|
||||
|
||||
for i := 0; uint32(i) < n; i++ {
|
||||
switch atype {
|
||||
case ggufTypeUint8:
|
||||
arr = append(arr, llm.readU8(r))
|
||||
case ggufTypeInt8:
|
||||
arr = append(arr, llm.readU8(r))
|
||||
case ggufTypeUint16:
|
||||
arr = append(arr, llm.readU16(r))
|
||||
case ggufTypeInt16:
|
||||
arr = append(arr, llm.readI16(r))
|
||||
case ggufTypeUint32:
|
||||
arr = append(arr, llm.readU32(r))
|
||||
case ggufTypeInt32:
|
||||
arr = append(arr, llm.readI32(r))
|
||||
case ggufTypeFloat32:
|
||||
arr = append(arr, llm.readF32(r))
|
||||
case ggufTypeBool:
|
||||
arr = append(arr, llm.readBool(r))
|
||||
case ggufTypeString:
|
||||
s, err := llm.readStringV1(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arr = append(arr, s)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid array type: %d", atype)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (llm *ggufModel) readArray(r io.Reader) (arr []any, err error) {
|
||||
atype := llm.readU32(r)
|
||||
n := llm.readU64(r)
|
||||
|
||||
for i := 0; uint64(i) < n; i++ {
|
||||
switch atype {
|
||||
case ggufTypeUint8:
|
||||
arr = append(arr, llm.readU8(r))
|
||||
case ggufTypeInt8:
|
||||
arr = append(arr, llm.readU8(r))
|
||||
case ggufTypeUint16:
|
||||
arr = append(arr, llm.readU16(r))
|
||||
case ggufTypeInt16:
|
||||
arr = append(arr, llm.readI16(r))
|
||||
case ggufTypeUint32:
|
||||
arr = append(arr, llm.readU32(r))
|
||||
case ggufTypeInt32:
|
||||
arr = append(arr, llm.readI32(r))
|
||||
case ggufTypeUint64:
|
||||
arr = append(arr, llm.readU64(r))
|
||||
case ggufTypeInt64:
|
||||
arr = append(arr, llm.readI64(r))
|
||||
case ggufTypeFloat32:
|
||||
arr = append(arr, llm.readF32(r))
|
||||
case ggufTypeFloat64:
|
||||
arr = append(arr, llm.readF64(r))
|
||||
case ggufTypeBool:
|
||||
arr = append(arr, llm.readBool(r))
|
||||
case ggufTypeString:
|
||||
s, err := llm.readString(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
arr = append(arr, s)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid array type: %d", atype)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
18
llm/llama.cpp/generate_darwin_amd64.go
Normal file
18
llm/llama.cpp/generate_darwin_amd64.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package llm
|
||||
|
||||
//go:generate git submodule init
|
||||
|
||||
//go:generate git submodule update --force ggml
|
||||
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
|
||||
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
|
||||
//go:generate git -C ggml apply ../patches/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch
|
||||
//go:generate git -C ggml apply ../patches/0004-metal-add-missing-barriers-for-mul-mat-2699.patch
|
||||
//go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
|
||||
//go:generate cmake --build ggml/build/cpu --target server --config Release
|
||||
//go:generate mv ggml/build/cpu/bin/server ggml/build/cpu/bin/ollama-runner
|
||||
|
||||
//go:generate git submodule update --force gguf
|
||||
//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch
|
||||
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
|
||||
//go:generate cmake --build gguf/build/cpu --target server --config Release
|
||||
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
|
18
llm/llama.cpp/generate_darwin_arm64.go
Normal file
18
llm/llama.cpp/generate_darwin_arm64.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package llm
|
||||
|
||||
//go:generate git submodule init
|
||||
|
||||
//go:generate git submodule update --force ggml
|
||||
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
|
||||
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
|
||||
//go:generate git -C ggml apply ../patches/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch
|
||||
//go:generate git -C ggml apply ../patches/0004-metal-add-missing-barriers-for-mul-mat-2699.patch
|
||||
//go:generate cmake -S ggml -B ggml/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
|
||||
//go:generate cmake --build ggml/build/metal --target server --config Release
|
||||
//go:generate mv ggml/build/metal/bin/server ggml/build/metal/bin/ollama-runner
|
||||
|
||||
//go:generate git submodule update --force gguf
|
||||
//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch
|
||||
//go:generate cmake -S gguf -B gguf/build/metal -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
|
||||
//go:generate cmake --build gguf/build/metal --target server --config Release
|
||||
//go:generate mv gguf/build/metal/bin/server gguf/build/metal/bin/ollama-runner
|
26
llm/llama.cpp/generate_linux.go
Normal file
26
llm/llama.cpp/generate_linux.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package llm
|
||||
|
||||
//go:generate git submodule init
|
||||
|
||||
//go:generate git submodule update --force ggml
|
||||
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
|
||||
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
|
||||
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
|
||||
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
||||
//go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build ggml/build/cpu --target server --config Release
|
||||
//go:generate mv ggml/build/cpu/bin/server ggml/build/cpu/bin/ollama-runner
|
||||
|
||||
//go:generate git submodule update --force gguf
|
||||
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
||||
//go:generate git -C gguf apply ../patches/0001-remove-warm-up-logging.patch
|
||||
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build gguf/build/cpu --target server --config Release
|
||||
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
|
||||
|
||||
//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build ggml/build/cuda --target server --config Release
|
||||
//go:generate mv ggml/build/cuda/bin/server ggml/build/cuda/bin/ollama-runner
|
||||
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build gguf/build/cuda --target server --config Release
|
||||
//go:generate mv gguf/build/cuda/bin/server gguf/build/cuda/bin/ollama-runner
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user