Compare commits
564 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6ee8c80199 | ||
![]() |
31f0551dab | ||
![]() |
4a1abfe4fa | ||
![]() |
bbd41494bf | ||
![]() |
fedba24a63 | ||
![]() |
e3b090dbc5 | ||
![]() |
d9e60f634b | ||
![]() |
4251b342de | ||
![]() |
0a9d348023 | ||
![]() |
3144e2a439 | ||
![]() |
c0960e29b5 | ||
![]() |
5314fc9b63 | ||
![]() |
a36b5fef3b | ||
![]() |
910e9401d0 | ||
![]() |
56ffc3023a | ||
![]() |
7a1b37ac64 | ||
![]() |
5d4d2e2c60 | ||
![]() |
7db5bcf73b | ||
![]() |
fa2f095bd9 | ||
![]() |
045b855db9 | ||
![]() |
32064a0646 | ||
![]() |
d9a250e9b5 | ||
![]() |
944519ed16 | ||
![]() |
2dd040d04c | ||
![]() |
bbe41ce41a | ||
![]() |
9e1406e4ed | ||
![]() |
b74580c913 | ||
![]() |
7e9405fd07 | ||
![]() |
3b0b8930d4 | ||
![]() |
e3f925fc1b | ||
![]() |
2a2289fb6b | ||
![]() |
dd427f499a | ||
![]() |
2ae573c7ed | ||
![]() |
02fe26c44b | ||
![]() |
16c7548460 | ||
![]() |
fa75998c0d | ||
![]() |
5344f886c8 | ||
![]() |
6cc823c9b5 | ||
![]() |
b84d34e632 | ||
![]() |
30229a913c | ||
![]() |
1ade380bd7 | ||
![]() |
ba264e9da8 | ||
![]() |
a2405ec831 | ||
![]() |
ce809bb529 | ||
![]() |
76bc4d0458 | ||
![]() |
4a02945a15 | ||
![]() |
aec742b6d2 | ||
![]() |
f337642e94 | ||
![]() |
51131cc6e2 | ||
![]() |
43027789dc | ||
![]() |
f9b7d65e2b | ||
![]() |
1f05d77110 | ||
![]() |
c3ff36088b | ||
![]() |
13524b5e72 | ||
![]() |
f1b049fed8 | ||
![]() |
97c5696945 | ||
![]() |
47d4e22673 | ||
![]() |
32f62fbb8e | ||
![]() |
5d75505ebd | ||
![]() |
b9495ea162 | ||
![]() |
409bb9674e | ||
![]() |
d3479c07a1 | ||
![]() |
b12f1b984f | ||
![]() |
195e3d9dbd | ||
![]() |
38fe1a368b | ||
![]() |
4b77fcb2b9 | ||
![]() |
cde13bcdea | ||
![]() |
0f0cd265a7 | ||
![]() |
0db4706ec2 | ||
![]() |
1ebdbd9694 | ||
![]() |
5c59455b59 | ||
![]() |
00d06619a1 | ||
![]() |
f1ef3f9947 | ||
![]() |
5a5dca13b2 | ||
![]() |
7232f1fa41 | ||
![]() |
72e7a49aa9 | ||
![]() |
a3737cbd33 | ||
![]() |
998f1785b6 | ||
![]() |
70a93057cd | ||
![]() |
2cb0fa7d40 | ||
![]() |
b2816bca67 | ||
![]() |
bf704423c5 | ||
![]() |
7a0899d62d | ||
![]() |
0cca1486dd | ||
![]() |
2113c9d31a | ||
![]() |
6deebf2489 | ||
![]() |
95cb38ae47 | ||
![]() |
1f126afb2d | ||
![]() |
f6201a7a6c | ||
![]() |
b3f6c6598f | ||
![]() |
88620e983a | ||
![]() |
cedae0d17a | ||
![]() |
bb80a597db | ||
![]() |
6681d37861 | ||
![]() |
0409c1fa59 | ||
![]() |
b56e92470a | ||
![]() |
5687f1a0cf | ||
![]() |
7eda3d0c55 | ||
![]() |
7194a07d4d | ||
![]() |
13efd5f218 | ||
![]() |
c4bdfffd96 | ||
![]() |
26c63418e0 | ||
![]() |
2799784ac8 | ||
![]() |
91897a606f | ||
![]() |
96122b7271 | ||
![]() |
39be7fdb98 | ||
![]() |
c2e3b89176 | ||
![]() |
cde31cb220 | ||
![]() |
63097607b2 | ||
![]() |
2ae80e1e27 | ||
![]() |
b173cfc558 | ||
![]() |
424d53ac70 | ||
![]() |
e1a69d44c9 | ||
![]() |
3d620f9462 | ||
![]() |
928950fcc6 | ||
![]() |
39c6d949fc | ||
![]() |
16a9006306 | ||
![]() |
e9216ea459 | ||
![]() |
9e4a316405 | ||
![]() |
9fb5e8399c | ||
![]() |
82b9b329ff | ||
![]() |
12e8c12d2b | ||
![]() |
d77dde126b | ||
![]() |
c7e70cd3bb | ||
![]() |
199941cd15 | ||
![]() |
c9474f7f61 | ||
![]() |
927e3ba4a4 | ||
![]() |
37d95157df | ||
![]() |
2eaa95b417 | ||
![]() |
3cd07728f4 | ||
![]() |
ecf8b793f0 | ||
![]() |
abf294826b | ||
![]() |
ae06bb426b | ||
![]() |
d8e0f62ebb | ||
![]() |
a00fac4ec8 | ||
![]() |
f2113c1fc7 | ||
![]() |
6452e2ecb8 | ||
![]() |
9a28e263a5 | ||
![]() |
0c066c9214 | ||
![]() |
aabd71aede | ||
![]() |
da4d7c9f9c | ||
![]() |
f321b13a03 | ||
![]() |
5ebcde1541 | ||
![]() |
45206cb7cc | ||
![]() |
6e65b84f54 | ||
![]() |
c00ce12e83 | ||
![]() |
e1cd3152c9 | ||
![]() |
0bef3778c9 | ||
![]() |
6ebab38b89 | ||
![]() |
5d8e864d44 | ||
![]() |
5f7acd0bbd | ||
![]() |
44b3a1ad42 | ||
![]() |
0260be4414 | ||
![]() |
a3fcecf943 | ||
![]() |
df07e4a097 | ||
![]() |
0b7ade0d4c | ||
![]() |
19b7a4d715 | ||
![]() |
31ab453d37 | ||
![]() |
35c4b5ec16 | ||
![]() |
f24741ff39 | ||
![]() |
8c4022b06b | ||
![]() |
433702f421 | ||
![]() |
48896f626c | ||
![]() |
c57aee6fba | ||
![]() |
6066c70edd | ||
![]() |
f10ac5de19 | ||
![]() |
93a108214c | ||
![]() |
be61a81758 | ||
![]() |
2fdf1b5ff8 | ||
![]() |
331068b964 | ||
![]() |
0179d8eb6b | ||
![]() |
be48741308 | ||
![]() |
6bbd6e26fb | ||
![]() |
e6ad4813d3 | ||
![]() |
13ba6df5ab | ||
![]() |
9d73d3a6b5 | ||
![]() |
72cd336410 | ||
![]() |
1bd594b2fa | ||
![]() |
9a8c21ac3d | ||
![]() |
f6b317e8c9 | ||
![]() |
ac5076ce1e | ||
![]() |
42c2e3a624 | ||
![]() |
cb42589792 | ||
![]() |
258addc799 | ||
![]() |
c06b9b7304 | ||
![]() |
95b9acd324 | ||
![]() |
04cbf5ccc0 | ||
![]() |
e1d7056496 | ||
![]() |
02524a56ff | ||
![]() |
1657c6abc7 | ||
![]() |
12e046f12a | ||
![]() |
36a3bbf65f | ||
![]() |
43a726149d | ||
![]() |
984714f131 | ||
![]() |
bab9494176 | ||
![]() |
85e4441c6a | ||
![]() |
42e43736a4 | ||
![]() |
c6e6c8ee7e | ||
![]() |
a185b29719 | ||
![]() |
dc84b20d6b | ||
![]() |
ad8659b980 | ||
![]() |
c1bbf5ddee | ||
![]() |
0b19e24d81 | ||
![]() |
3cb07d2773 | ||
![]() |
976068369b | ||
![]() |
4d677ee389 | ||
![]() |
7ea905871a | ||
![]() |
d6ecaa2cbf | ||
![]() |
4dcf7a59b1 | ||
![]() |
1c0e092ead | ||
![]() |
c4a3ccd7ac | ||
![]() |
9f04e5a8ea | ||
![]() |
f91bb2f7f0 | ||
![]() |
0813387414 | ||
![]() |
4936b5bb37 | ||
![]() |
786288829e | ||
![]() |
72dcc952b6 | ||
![]() |
f7f6d6c693 | ||
![]() |
a3053b66d2 | ||
![]() |
c82ead4d01 | ||
![]() |
90860b6a7e | ||
![]() |
81092147c4 | ||
![]() |
92656a74b7 | ||
![]() |
41434a7cdc | ||
![]() |
71687ab809 | ||
![]() |
d8842b4d4b | ||
![]() |
32add8577d | ||
![]() |
585f9c01fa | ||
![]() |
c13bde962d | ||
![]() |
ee307937fd | ||
![]() |
ab6639bc47 | ||
![]() |
fefae84c06 | ||
![]() |
dbe6e77472 | ||
![]() |
4b3f4bc7d9 | ||
![]() |
a5ccf742c1 | ||
![]() |
e33ef391cd | ||
![]() |
75295b9528 | ||
![]() |
db5ef3004c | ||
![]() |
b5f158f046 | ||
![]() |
30141b42e9 | ||
![]() |
5f301ece1d | ||
![]() |
77954bea0e | ||
![]() |
54f92f01cb | ||
![]() |
30ae6e731e | ||
![]() |
b28a30f7ba | ||
![]() |
ecd71347ab | ||
![]() |
8ee4cbea0f | ||
![]() |
652d90e1c7 | ||
![]() |
bc22d5a38b | ||
![]() |
71d71d0988 | ||
![]() |
1901044b07 | ||
![]() |
d660eebf22 | ||
![]() |
cac11c9137 | ||
![]() |
a07c935d34 | ||
![]() |
1552cee59f | ||
![]() |
3ca56b5ada | ||
![]() |
b0d14ed51c | ||
![]() |
f61f340279 | ||
![]() |
686f85d6ca | ||
![]() |
85951d25ef | ||
![]() |
779e196ef6 | ||
![]() |
01ea6002c4 | ||
![]() |
423862042a | ||
![]() |
df18486c35 | ||
![]() |
4e612a2e92 | ||
![]() |
47ffb81db7 | ||
![]() |
69795d2db0 | ||
![]() |
acde0819d9 | ||
![]() |
f748331aa3 | ||
![]() |
f4edc302a8 | ||
![]() |
64b7e0c218 | ||
![]() |
eced0d52ab | ||
![]() |
96bf9cafa7 | ||
![]() |
6e0f686afa | ||
![]() |
c1a5220860 | ||
![]() |
3b15175a70 | ||
![]() |
c1844bbee2 | ||
![]() |
cb745965ce | ||
![]() |
8d29b6a2b6 | ||
![]() |
724aa64bee | ||
![]() |
d91c103e74 | ||
![]() |
98ec7d81e3 | ||
![]() |
b6817a83d8 | ||
![]() |
73f3448ede | ||
![]() |
7c438f2c53 | ||
![]() |
6e46338d44 | ||
![]() |
cdddd3df65 | ||
![]() |
afa61bdf45 | ||
![]() |
cc54a416c6 | ||
![]() |
c819d7f68a | ||
![]() |
e4f59ba073 | ||
![]() |
5de568bffe | ||
![]() |
5cba29b9d6 | ||
![]() |
d17730356a | ||
![]() |
32d79a6eea | ||
![]() |
5b39503bcd | ||
![]() |
1ae84bc2a2 | ||
![]() |
db8bf336fc | ||
![]() |
d77e094a90 | ||
![]() |
dd3dc47ddb | ||
![]() |
c5e1bbabda | ||
![]() |
a49d6acc1e | ||
![]() |
6e9bcdb9b3 | ||
![]() |
13086363bd | ||
![]() |
ec2a31e9b3 | ||
![]() |
ec84c02d54 | ||
![]() |
2a88b66bc9 | ||
![]() |
2d0faea96c | ||
![]() |
637142181a | ||
![]() |
bcbff421c9 | ||
![]() |
1359d6cf3b | ||
![]() |
6e2d0224d9 | ||
![]() |
921406f721 | ||
![]() |
c7047d7353 | ||
![]() |
1d155caba3 | ||
![]() |
866324b9a5 | ||
![]() |
145e060855 | ||
![]() |
146072113d | ||
![]() |
33d31d1b56 | ||
![]() |
274c6cbf4c | ||
![]() |
7ebbd89bbf | ||
![]() |
9079b1bb6d | ||
![]() |
6febde7200 | ||
![]() |
325cfcd9ff | ||
![]() |
639d0fd070 | ||
![]() |
e21579a0f1 | ||
![]() |
c44b619428 | ||
![]() |
434a6f9d46 | ||
![]() |
b13586cc72 | ||
![]() |
17678b7225 | ||
![]() |
84725ec7e3 | ||
![]() |
6109bebba6 | ||
![]() |
8ae8c9fa8c | ||
![]() |
f39daff461 | ||
![]() |
c50b01bc21 | ||
![]() |
b9dc875401 | ||
![]() |
06589a3b30 | ||
![]() |
1fd511e661 | ||
![]() |
c01bbe94fd | ||
![]() |
1beb5645a9 | ||
![]() |
6db3691b8f | ||
![]() |
fe5a872444 | ||
![]() |
d39709260f | ||
![]() |
60bb3c03a1 | ||
![]() |
2e53704685 | ||
![]() |
527f9a7975 | ||
![]() |
c4cc738cbf | ||
![]() |
2c6189f4fe | ||
![]() |
dccac8c8fa | ||
![]() |
c05ab9a86e | ||
![]() |
f42f3d9b27 | ||
![]() |
341fb7e35f | ||
![]() |
f31961637f | ||
![]() |
ec3614812a | ||
![]() |
f14969314a | ||
![]() |
1fb9288661 | ||
![]() |
01a03caa20 | ||
![]() |
bf6786bb39 | ||
![]() |
642128b75a | ||
![]() |
f21bd6210d | ||
![]() |
80362fedce | ||
![]() |
5757925060 | ||
![]() |
4512301756 | ||
![]() |
2236a93efc | ||
![]() |
ad88799411 | ||
![]() |
0818b5e318 | ||
![]() |
1df6100c77 | ||
![]() |
5c48fe1fb0 | ||
![]() |
874bb31986 | ||
![]() |
f7856a57eb | ||
![]() |
f9a4281124 | ||
![]() |
96da0792e6 | ||
![]() |
95d24262fc | ||
![]() |
8d03bd7b54 | ||
![]() |
9ec16f0f03 | ||
![]() |
57a58db1b0 | ||
![]() |
2d75a4537c | ||
![]() |
4748609611 | ||
![]() |
c0dcea1398 | ||
![]() |
115fc56eb7 | ||
![]() |
186f685224 | ||
![]() |
12efcbb057 | ||
![]() |
4e09aab8b9 | ||
![]() |
3a1ed9ff70 | ||
![]() |
6d283882b1 | ||
![]() |
5c3491f425 | ||
![]() |
e5d1ce4dde | ||
![]() |
2665f3c28e | ||
![]() |
a79f030e75 | ||
![]() |
9bc5864a03 | ||
![]() |
b88cc0fac9 | ||
![]() |
5b2cf16397 | ||
![]() |
910816a532 | ||
![]() |
28c3f288e2 | ||
![]() |
deeac961bb | ||
![]() |
49443e7da5 | ||
![]() |
bb8464c0d2 | ||
![]() |
daa5bb4473 | ||
![]() |
92119de9d8 | ||
![]() |
53b0ba8d43 | ||
![]() |
db342691f9 | ||
![]() |
cecf83141e | ||
![]() |
a5a2adf1ec | ||
![]() |
b0c9cd0f3b | ||
![]() |
77f61c6301 | ||
![]() |
f3604534e5 | ||
![]() |
914428351a | ||
![]() |
9afea9e3b9 | ||
![]() |
c039432b5c | ||
![]() |
c345b4ca7c | ||
![]() |
0c7a00a264 | ||
![]() |
36c160f1c3 | ||
![]() |
b66bcaa582 | ||
![]() |
c9167494cb | ||
![]() |
125d0a013a | ||
![]() |
ba2da6ceaa | ||
![]() |
ccff9ca09c | ||
![]() |
436a5be49c | ||
![]() |
cc0bf96398 | ||
![]() |
386169205c | ||
![]() |
0d6342a882 | ||
![]() |
75bee074b6 | ||
![]() |
533d76368c | ||
![]() |
459f4a7889 | ||
![]() |
25c63c91d8 | ||
![]() |
cbfff4f868 | ||
![]() |
7ed5a39bc7 | ||
![]() |
cc1d03f4ec | ||
![]() |
846f593dbf | ||
![]() |
0a53da03fd | ||
![]() |
2ce1793a1d | ||
![]() |
e1c5be24e7 | ||
![]() |
2ad8a074ac | ||
![]() |
7e547c6833 | ||
![]() |
689842b9ff | ||
![]() |
a19d47642e | ||
![]() |
a7dad24d92 | ||
![]() |
6b213216d5 | ||
![]() |
fe6f3b48f7 | ||
![]() |
36c88cb9db | ||
![]() |
235e43d7f6 | ||
![]() |
730996e530 | ||
![]() |
ce6197a8e0 | ||
![]() |
46b9953f32 | ||
![]() |
4dcceeffb7 | ||
![]() |
019e4a4558 | ||
![]() |
627d04d927 | ||
![]() |
940e8ebec3 | ||
![]() |
565648f3f7 | ||
![]() |
90c49bed57 | ||
![]() |
3a2477174f | ||
![]() |
8c6c2cbc8c | ||
![]() |
5dc0cff459 | ||
![]() |
c5c8b4b16a | ||
![]() |
8299bf76ed | ||
![]() |
ee4979e510 | ||
![]() |
08b0e04f40 | ||
![]() |
b36b0b71f8 | ||
![]() |
094df37563 | ||
![]() |
f3648fd206 | ||
![]() |
bd93a94abd | ||
![]() |
f55bdb6f10 | ||
![]() |
2870a9bfc8 | ||
![]() |
c031c211d1 | ||
![]() |
68391b0055 | ||
![]() |
b7e137323a | ||
![]() |
8fa3f366ad | ||
![]() |
fddb303f23 | ||
![]() |
ad5ee20c7b | ||
![]() |
785b4eb5bf | ||
![]() |
16ede1b30b | ||
![]() |
17d6bbbb2a | ||
![]() |
6481b7f34c | ||
![]() |
cb4a80b693 | ||
![]() |
68d7255bd3 | ||
![]() |
9ef2fce33a | ||
![]() |
43eaba3d60 | ||
![]() |
1af493c5a0 | ||
![]() |
a0c3e989de | ||
![]() |
7af0fdce48 | ||
![]() |
ee94693b1a | ||
![]() |
731dbdc1a5 | ||
![]() |
06bcfbd629 | ||
![]() |
7d7c2510f8 | ||
![]() |
f9b2f999ac | ||
![]() |
c416087339 | ||
![]() |
6002cebd2c | ||
![]() |
212bdc541c | ||
![]() |
dca6686273 | ||
![]() |
598621afab | ||
![]() |
6479f49c09 | ||
![]() |
b2974a7095 | ||
![]() |
832b4db9d4 | ||
![]() |
c43873f33b | ||
![]() |
11d82d7b9b | ||
![]() |
36fe2deebf | ||
![]() |
4a8931f634 | ||
![]() |
bd6e38fb1a | ||
![]() |
92189a5855 | ||
![]() |
d790bf9916 | ||
![]() |
35afac099a | ||
![]() |
811c3d1900 | ||
![]() |
3553d10769 | ||
![]() |
6fe178134d | ||
![]() |
d890890f66 | ||
![]() |
89ba19feca | ||
![]() |
6f58c77671 | ||
![]() |
3c975f898f | ||
![]() |
9245c8a1df | ||
![]() |
7a537cdca9 | ||
![]() |
257ffeb997 | ||
![]() |
9b513bb6b1 | ||
![]() |
042100f797 | ||
![]() |
7804b8fab9 | ||
![]() |
56497663c8 | ||
![]() |
e1afcb8af2 | ||
![]() |
385eeea357 | ||
![]() |
8a41b244e8 | ||
![]() |
92578798bb | ||
![]() |
788637918a | ||
![]() |
c413a55093 | ||
![]() |
630bb75d2a | ||
![]() |
a2055a1e93 | ||
![]() |
b599946b74 | ||
![]() |
aca2d65b82 | ||
![]() |
b5e08e3373 | ||
![]() |
274d5a5fdf | ||
![]() |
fc6b49be32 | ||
![]() |
77295f716e | ||
![]() |
615f7d1dea | ||
![]() |
cdf5e106ae | ||
![]() |
a85329f59a | ||
![]() |
f2ba1311aa | ||
![]() |
65dcd0ce35 | ||
![]() |
0040f543a2 | ||
![]() |
767f9bdbbb | ||
![]() |
f7f5169c94 | ||
![]() |
2cfffea02e | ||
![]() |
f6e98334e4 | ||
![]() |
ab0668293c | ||
![]() |
af4cf55884 | ||
![]() |
d6786f2945 | ||
![]() |
38dc2f79bc | ||
![]() |
cb961c87ca | ||
![]() |
0560b28a8d | ||
![]() |
10199c5987 | ||
![]() |
288814d3e4 | ||
![]() |
04733438da | ||
![]() |
711e891f0f | ||
![]() |
090d08422b | ||
![]() |
5b84404c64 | ||
![]() |
8544edca21 | ||
![]() |
5d22319a2c | ||
![]() |
2130c0708b | ||
![]() |
61ff1946e6 | ||
![]() |
d06bc0cb6e | ||
![]() |
d104b7e997 | ||
![]() |
9e2de1bd2c | ||
![]() |
dc87e9c9ae | ||
![]() |
367cb68dc1 | ||
![]() |
c02c0cd483 | ||
![]() |
6f2ce74231 | ||
![]() |
6edcc5c79f | ||
![]() |
a92fdff620 |
@@ -6,3 +6,4 @@ scripts
|
||||
llm/llama.cpp/ggml
|
||||
llm/llama.cpp/gguf
|
||||
.env
|
||||
.cache
|
||||
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -6,3 +6,6 @@
|
||||
dist
|
||||
ollama
|
||||
ggml-metal.metal
|
||||
.cache
|
||||
*.exe
|
||||
.idea
|
12
Dockerfile
12
Dockerfile
@@ -1,16 +1,16 @@
|
||||
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG VERSION=0.0.0
|
||||
ARG GOFLAGS="'-ldflags=-w -s'"
|
||||
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
RUN apt-get update && apt-get install -y git build-essential cmake
|
||||
ADD https://dl.google.com/go/go1.21.1.linux-$TARGETARCH.tar.gz /tmp/go1.21.1.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.1.tar.gz
|
||||
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
|
||||
|
||||
COPY . .
|
||||
ENV GOARCH=$TARGETARCH
|
||||
ENV GOFLAGS=$GOFLAGS
|
||||
RUN /usr/local/go/bin/go generate ./... \
|
||||
&& /usr/local/go/bin/go build .
|
||||
|
||||
@@ -19,5 +19,11 @@ RUN apt-get update && apt-get install -y ca-certificates
|
||||
COPY --from=0 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
||||
EXPOSE 11434
|
||||
ENV OLLAMA_HOST 0.0.0.0
|
||||
|
||||
# set some environment variable for better NVIDIA compatibility
|
||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
|
||||
ENTRYPOINT ["/bin/ollama"]
|
||||
CMD ["serve"]
|
||||
|
@@ -1,6 +1,5 @@
|
||||
|
||||
# centos7 amd64 dependencies
|
||||
FROM --platform=linux/amd64 nvidia/cuda:11.8.0-devel-centos7 AS base-amd64
|
||||
FROM --platform=linux/amd64 nvidia/cuda:11.3.1-devel-centos7 AS base-amd64
|
||||
RUN yum install -y https://repo.ius.io/ius-release-el7.rpm centos-release-scl && \
|
||||
yum update -y && \
|
||||
yum install -y devtoolset-10-gcc devtoolset-10-gcc-c++ git236 wget
|
||||
@@ -8,25 +7,25 @@ RUN wget "https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
|
||||
# centos8 arm64 dependencies
|
||||
FROM --platform=linux/arm64 nvidia/cuda:11.4.3-devel-centos8 AS base-arm64
|
||||
FROM --platform=linux/arm64 nvidia/cuda-arm64:11.3.1-devel-centos8 AS base-arm64
|
||||
RUN sed -i -e 's/mirrorlist/#mirrorlist/g' -e 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
|
||||
RUN yum install -y git cmake
|
||||
|
||||
FROM base-${TARGETARCH}
|
||||
ARG TARGETARCH
|
||||
ARG GOFLAGS="'-ldflags -w -s'"
|
||||
|
||||
# install go
|
||||
ADD https://dl.google.com/go/go1.21.1.linux-$TARGETARCH.tar.gz /tmp/go1.21.1.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.1.tar.gz
|
||||
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
|
||||
|
||||
# build the final binary
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
COPY . .
|
||||
|
||||
ENV GOOS=linux
|
||||
ENV GOARCH=$TARGETARCH
|
||||
|
||||
ARG VERSION=0.0.0
|
||||
ARG GOFLAGS="'-ldflags -w -s'"
|
||||
ENV GOFLAGS=$GOFLAGS
|
||||
|
||||
RUN /usr/local/go/bin/go generate ./... && \
|
||||
/usr/local/go/bin/go build .
|
||||
|
132
README.md
132
README.md
@@ -13,7 +13,11 @@ Get up and running with large language models locally.
|
||||
|
||||
### macOS
|
||||
|
||||
[Download](https://ollama.ai/download/Ollama-darwin.zip)
|
||||
[Download](https://ollama.ai/download/Ollama-darwin.zip)
|
||||
|
||||
### Windows
|
||||
|
||||
Coming soon!
|
||||
|
||||
### Linux & WSL2
|
||||
|
||||
@@ -23,9 +27,9 @@ curl https://ollama.ai/install.sh | sh
|
||||
|
||||
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
|
||||
|
||||
### Windows
|
||||
### Docker
|
||||
|
||||
coming soon
|
||||
The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `ollama/ollama` is available on Docker Hub.
|
||||
|
||||
## Quickstart
|
||||
|
||||
@@ -37,12 +41,14 @@ ollama run llama2
|
||||
|
||||
## Model library
|
||||
|
||||
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library "ollama model library")
|
||||
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library 'ollama model library')
|
||||
|
||||
Here are some example open-source models that can be downloaded:
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
|
||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||
@@ -51,36 +57,41 @@ Here are some example open-source models that can be downloaded:
|
||||
| Llama 2 70B | 70B | 39GB | `ollama run llama2:70b` |
|
||||
| Orca Mini | 3B | 1.9GB | `ollama run orca-mini` |
|
||||
| Vicuna | 7B | 3.8GB | `ollama run vicuna` |
|
||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||
|
||||
> Note: You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
|
||||
|
||||
## Customize your own model
|
||||
|
||||
### Import from GGUF or GGML
|
||||
### Import from GGUF
|
||||
|
||||
Ollama supports importing GGUF and GGML file formats in the Modelfile. This means if you have a model that is not in the Ollama library, you can create it, iterate on it, and upload it to the Ollama library to share with others when you are ready.
|
||||
Ollama supports importing GGUF models in the Modelfile:
|
||||
|
||||
1. Create a file named Modelfile, and add a `FROM` instruction with the local filepath to the model you want to import.
|
||||
1. Create a file named `Modelfile`, with a `FROM` instruction with the local filepath to the model you want to import.
|
||||
|
||||
```
|
||||
FROM ./vicuna-33b.Q4_0.gguf
|
||||
```
|
||||
|
||||
3. Create the model in Ollama
|
||||
2. Create the model in Ollama
|
||||
|
||||
```
|
||||
ollama create name -f path_to_modelfile
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
5. Run the model
|
||||
3. Run the model
|
||||
|
||||
```
|
||||
ollama run name
|
||||
ollama run example
|
||||
```
|
||||
|
||||
### Import from PyTorch or Safetensors
|
||||
|
||||
See the [guide](docs/import.md) on importing models for more information.
|
||||
|
||||
### Customize a prompt
|
||||
|
||||
Models from the Ollama library can be customized with a prompt. The example
|
||||
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama2` model:
|
||||
|
||||
```
|
||||
ollama pull llama2
|
||||
@@ -94,7 +105,7 @@ FROM llama2
|
||||
# set the temperature to 1 [higher is more creative, lower is more coherent]
|
||||
PARAMETER temperature 1
|
||||
|
||||
# set the system prompt
|
||||
# set the system message
|
||||
SYSTEM """
|
||||
You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
|
||||
"""
|
||||
@@ -109,7 +120,7 @@ ollama run mario
|
||||
Hello! It's your friend Mario.
|
||||
```
|
||||
|
||||
For more examples, see the [examples](./examples) directory. For more information on working with a Modelfile, see the [Modelfile](./docs/modelfile.md) documentation.
|
||||
For more examples, see the [examples](examples) directory. For more information on working with a Modelfile, see the [Modelfile](docs/modelfile.md) documentation.
|
||||
|
||||
## CLI Reference
|
||||
|
||||
@@ -148,10 +159,17 @@ For multiline input, you can wrap text with `"""`:
|
||||
I'm a basic program that prints the famous "Hello, world!" message to the console.
|
||||
```
|
||||
|
||||
### Multimodal models
|
||||
|
||||
```
|
||||
>>> What's in this image? /Users/jmorgan/Desktop/smile.png
|
||||
The image features a yellow smiley face, which is likely the central focus of the picture.
|
||||
```
|
||||
|
||||
### Pass in prompt as arguments
|
||||
|
||||
```
|
||||
$ ollama run llama2 "summarize this file:" "$(cat README.md)"
|
||||
$ ollama run llama2 "Summarize this file: $(cat README.md)"
|
||||
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||
```
|
||||
|
||||
@@ -170,8 +188,7 @@ ollama list
|
||||
Install `cmake` and `go`:
|
||||
|
||||
```
|
||||
brew install cmake
|
||||
brew install go
|
||||
brew install cmake go
|
||||
```
|
||||
|
||||
Then generate dependencies and build:
|
||||
@@ -195,30 +212,93 @@ Finally, in a separate shell, run a model:
|
||||
|
||||
## REST API
|
||||
|
||||
> See the [API documentation](./docs/api.md) for all endpoints.
|
||||
Ollama has a REST API for running and managing models.
|
||||
|
||||
Ollama has an API for running and managing models. For example to generate text from a model:
|
||||
### Generate a response
|
||||
|
||||
```
|
||||
curl -X POST http://localhost:11434/api/generate -d '{
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2",
|
||||
"prompt":"Why is the sky blue?"
|
||||
}'
|
||||
```
|
||||
|
||||
### Chat with a model
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "mistral",
|
||||
"messages": [
|
||||
{ "role": "user", "content": "why is the sky blue?" }
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
See the [API documentation](./docs/api.md) for all endpoints.
|
||||
|
||||
## Community Integrations
|
||||
|
||||
### Web & Desktop
|
||||
|
||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
||||
- [Web UI](https://github.com/ollama-webui/ollama-webui)
|
||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||
- [big-AGI](https://github.com/enricoros/big-agi/blob/main/docs/config-ollama.md)
|
||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||
- [Amica](https://github.com/semperai/amica)
|
||||
- [chatd](https://github.com/BruceMacD/chatd)
|
||||
|
||||
### Terminal
|
||||
|
||||
- [oterm](https://github.com/ggozad/oterm)
|
||||
- [Ellama Emacs client](https://github.com/s-kostyaev/ellama)
|
||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||
- [gen.nvim](https://github.com/David-Kunz/gen.nvim)
|
||||
- [ollama.nvim](https://github.com/nomnivore/ollama.nvim)
|
||||
- [ogpt.nvim](https://github.com/huynle/ogpt.nvim)
|
||||
- [gptel Emacs client](https://github.com/karthink/gptel)
|
||||
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
||||
|
||||
### Database
|
||||
|
||||
- [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md)
|
||||
|
||||
### Package managers
|
||||
|
||||
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
||||
|
||||
### Libraries
|
||||
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
||||
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
||||
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
||||
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
||||
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
||||
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel)
|
||||
|
||||
### Mobile
|
||||
|
||||
- [Maid](https://github.com/danemadsen/Maid) (Mobile Artificial Intelligence Distribution)
|
||||
|
||||
### Extensions & Plugins
|
||||
|
||||
- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama)
|
||||
- [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel)
|
||||
- [Continue](https://github.com/continuedev/continue)
|
||||
- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)
|
||||
- [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq)
|
||||
- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)
|
||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||
- [Dumbar](https://github.com/JerrySievert/Dumbar)
|
||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||
- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram)
|
||||
- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)
|
||||
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
||||
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
||||
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
||||
|
145
api/client.go
145
api/client.go
@@ -5,27 +5,23 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/jmorganca/ollama/format"
|
||||
"github.com/jmorganca/ollama/version"
|
||||
)
|
||||
|
||||
const DefaultHost = "127.0.0.1:11434"
|
||||
|
||||
var (
|
||||
envHost = os.Getenv("OLLAMA_HOST")
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Base url.URL
|
||||
HTTP http.Client
|
||||
Headers http.Header
|
||||
base *url.URL
|
||||
http http.Client
|
||||
}
|
||||
|
||||
func checkError(resp *http.Response, body []byte) error {
|
||||
@@ -44,49 +40,79 @@ func checkError(resp *http.Response, body []byte) error {
|
||||
return apiError
|
||||
}
|
||||
|
||||
// Host returns the default host to use for the client. It is determined in the following order:
|
||||
// 1. The OLLAMA_HOST environment variable
|
||||
// 2. The default host (localhost:11434)
|
||||
func Host() string {
|
||||
if envHost != "" {
|
||||
return envHost
|
||||
}
|
||||
return DefaultHost
|
||||
}
|
||||
func ClientFromEnvironment() (*Client, error) {
|
||||
defaultPort := "11434"
|
||||
|
||||
// FromEnv creates a new client using Host() as the host. An error is returns
|
||||
// if the host is invalid.
|
||||
func FromEnv() (*Client, error) {
|
||||
h := Host()
|
||||
if !strings.HasPrefix(h, "http://") && !strings.HasPrefix(h, "https://") {
|
||||
h = "http://" + h
|
||||
scheme, hostport, ok := strings.Cut(os.Getenv("OLLAMA_HOST"), "://")
|
||||
switch {
|
||||
case !ok:
|
||||
scheme, hostport = "http", os.Getenv("OLLAMA_HOST")
|
||||
case scheme == "http":
|
||||
defaultPort = "80"
|
||||
case scheme == "https":
|
||||
defaultPort = "443"
|
||||
}
|
||||
|
||||
u, err := url.Parse(h)
|
||||
// trim trailing slashes
|
||||
hostport = strings.TrimRight(hostport, "/")
|
||||
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse host: %w", err)
|
||||
host, port = "127.0.0.1", defaultPort
|
||||
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
||||
host = ip.String()
|
||||
} else if hostport != "" {
|
||||
host = hostport
|
||||
}
|
||||
}
|
||||
|
||||
if u.Port() == "" {
|
||||
u.Host += ":11434"
|
||||
client := Client{
|
||||
base: &url.URL{
|
||||
Scheme: scheme,
|
||||
Host: net.JoinHostPort(host, port),
|
||||
},
|
||||
}
|
||||
|
||||
return &Client{Base: *u, HTTP: http.Client{}}, nil
|
||||
mockRequest, err := http.NewRequest(http.MethodHead, client.base.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
proxyURL, err := http.ProxyFromEnvironment(mockRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.http = http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyURL(proxyURL),
|
||||
},
|
||||
}
|
||||
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
|
||||
var reqBody io.Reader
|
||||
var data []byte
|
||||
var err error
|
||||
if reqData != nil {
|
||||
|
||||
switch reqData := reqData.(type) {
|
||||
case io.Reader:
|
||||
// reqData is already an io.Reader
|
||||
reqBody = reqData
|
||||
case nil:
|
||||
// noop
|
||||
default:
|
||||
data, err = json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
requestURL := c.Base.JoinPath(path)
|
||||
requestURL := c.base.JoinPath(path)
|
||||
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), reqBody)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -96,11 +122,7 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
request.Header.Set("Accept", "application/json")
|
||||
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
|
||||
|
||||
for k, v := range c.Headers {
|
||||
request.Header[k] = v
|
||||
}
|
||||
|
||||
respObj, err := c.HTTP.Do(request)
|
||||
respObj, err := c.http.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -123,6 +145,8 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxBufferSize = 512 * format.KiloByte
|
||||
|
||||
func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error {
|
||||
var buf *bytes.Buffer
|
||||
if data != nil {
|
||||
@@ -134,23 +158,26 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
|
||||
buf = bytes.NewBuffer(bts)
|
||||
}
|
||||
|
||||
requestURL := c.Base.JoinPath(path)
|
||||
requestURL := c.base.JoinPath(path)
|
||||
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
request.Header.Set("Accept", "application/json")
|
||||
request.Header.Set("Accept", "application/x-ndjson")
|
||||
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
|
||||
|
||||
response, err := http.DefaultClient.Do(request)
|
||||
response, err := c.http.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
scanner := bufio.NewScanner(response.Body)
|
||||
// increase the buffer size to avoid running out of space
|
||||
scanBuf := make([]byte, 0, maxBufferSize)
|
||||
scanner.Buffer(scanBuf, maxBufferSize)
|
||||
for scanner.Scan() {
|
||||
var errorResponse struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
@@ -194,6 +221,19 @@ func (c *Client) Generate(ctx context.Context, req *GenerateRequest, fn Generate
|
||||
})
|
||||
}
|
||||
|
||||
type ChatResponseFunc func(ChatResponse) error
|
||||
|
||||
func (c *Client) Chat(ctx context.Context, req *ChatRequest, fn ChatResponseFunc) error {
|
||||
return c.stream(ctx, http.MethodPost, "/api/chat", req, func(bts []byte) error {
|
||||
var resp ChatResponse
|
||||
if err := json.Unmarshal(bts, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fn(resp)
|
||||
})
|
||||
}
|
||||
|
||||
type PullProgressFunc func(ProgressResponse) error
|
||||
|
||||
func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc) error {
|
||||
@@ -269,3 +309,30 @@ func (c *Client) Heartbeat(ctx context.Context) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
||||
if err := c.do(ctx, http.MethodHead, fmt.Sprintf("/api/blobs/%s", digest), nil, nil); err != nil {
|
||||
var statusError StatusError
|
||||
if !errors.As(err, &statusError) || statusError.StatusCode != http.StatusNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) Version(ctx context.Context) (string, error) {
|
||||
var version struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
if err := c.do(ctx, http.MethodGet, "/api/version", nil, &version); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return version.Version, nil
|
||||
}
|
||||
|
@@ -1,13 +1,17 @@
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
||||
|
||||
# Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses.
|
||||
# The final response object will include statistics and additional data from the request. Use the callback function to override
|
||||
# the default handler.
|
||||
def generate(model_name, prompt, system=None, template=None, context=None, options=None, callback=None):
|
||||
def generate(model_name, prompt, system=None, template=None, format="", context=None, options=None, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/generate"
|
||||
payload = {
|
||||
@@ -16,7 +20,8 @@ def generate(model_name, prompt, system=None, template=None, context=None, optio
|
||||
"system": system,
|
||||
"template": template,
|
||||
"context": context,
|
||||
"options": options
|
||||
"options": options,
|
||||
"format": format,
|
||||
}
|
||||
|
||||
# Remove keys with None values
|
||||
@@ -56,30 +61,86 @@ def generate(model_name, prompt, system=None, template=None, context=None, optio
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
# Create a blob file on the server if it doesn't exist.
|
||||
def create_blob(digest, file_path):
|
||||
url = f"{BASE_URL}/api/blobs/{digest}"
|
||||
|
||||
# Check if the blob exists
|
||||
response = requests.head(url)
|
||||
if response.status_code != 404:
|
||||
return # Blob already exists, no need to upload
|
||||
response.raise_for_status()
|
||||
|
||||
# Upload the blob
|
||||
with open(file_path, 'rb') as file_data:
|
||||
requests.post(url, data=file_data)
|
||||
|
||||
|
||||
# Create a model from a Modelfile. Use the callback function to override the default handler.
|
||||
def create(model_name, model_path, callback=None):
|
||||
def create(model_name, filename, callback=None):
|
||||
try:
|
||||
file_path = Path(filename).expanduser().resolve()
|
||||
processed_lines = []
|
||||
|
||||
# Read and process the modelfile
|
||||
with open(file_path, 'r') as f:
|
||||
for line in f:
|
||||
# Skip empty or whitespace-only lines
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
command, args = line.split(maxsplit=1)
|
||||
|
||||
if command.upper() in ["FROM", "ADAPTER"]:
|
||||
path = Path(args.strip()).expanduser()
|
||||
|
||||
# Check if path is relative and resolve it
|
||||
if not path.is_absolute():
|
||||
path = (file_path.parent / path)
|
||||
|
||||
# Skip if file does not exist for "model", this is handled by the server
|
||||
if not path.exists():
|
||||
processed_lines.append(line)
|
||||
continue
|
||||
|
||||
# Calculate SHA-256 hash
|
||||
with open(path, 'rb') as bin_file:
|
||||
hash = hashlib.sha256()
|
||||
hash.update(bin_file.read())
|
||||
blob = f"sha256:{hash.hexdigest()}"
|
||||
|
||||
# Add the file to the remote server
|
||||
create_blob(blob, path)
|
||||
|
||||
# Replace path with digest in the line
|
||||
line = f"{command} @{blob}\n"
|
||||
|
||||
processed_lines.append(line)
|
||||
|
||||
# Combine processed lines back into a single string
|
||||
modelfile_content = '\n'.join(processed_lines)
|
||||
|
||||
url = f"{BASE_URL}/api/create"
|
||||
payload = {"name": model_name, "path": model_path}
|
||||
|
||||
payload = {"name": model_name, "modelfile": modelfile_content}
|
||||
|
||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Iterating over the response line by line and displaying the status
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the status
|
||||
chunk = json.loads(line)
|
||||
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
print(f"Status: {chunk.get('status')}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
|
||||
# Pull a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple
|
||||
# calls to will share the same download progress. Use the callback function to override the default handler.
|
||||
def pull(model_name, insecure=False, callback=None):
|
||||
@@ -221,5 +282,3 @@ def heartbeat():
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return "Ollama is not running"
|
||||
|
||||
|
||||
|
43
api/client_test.go
Normal file
43
api/client_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package api
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestClientFromEnvironment(t *testing.T) {
|
||||
type testCase struct {
|
||||
value string
|
||||
expect string
|
||||
err error
|
||||
}
|
||||
|
||||
testCases := map[string]*testCase{
|
||||
"empty": {value: "", expect: "http://127.0.0.1:11434"},
|
||||
"only address": {value: "1.2.3.4", expect: "http://1.2.3.4:11434"},
|
||||
"only port": {value: ":1234", expect: "http://:1234"},
|
||||
"address and port": {value: "1.2.3.4:1234", expect: "http://1.2.3.4:1234"},
|
||||
"scheme http and address": {value: "http://1.2.3.4", expect: "http://1.2.3.4:80"},
|
||||
"scheme https and address": {value: "https://1.2.3.4", expect: "https://1.2.3.4:443"},
|
||||
"scheme, address, and port": {value: "https://1.2.3.4:1234", expect: "https://1.2.3.4:1234"},
|
||||
"hostname": {value: "example.com", expect: "http://example.com:11434"},
|
||||
"hostname and port": {value: "example.com:1234", expect: "http://example.com:1234"},
|
||||
"scheme http and hostname": {value: "http://example.com", expect: "http://example.com:80"},
|
||||
"scheme https and hostname": {value: "https://example.com", expect: "https://example.com:443"},
|
||||
"scheme, hostname, and port": {value: "https://example.com:1234", expect: "https://example.com:1234"},
|
||||
"trailing slash": {value: "example.com/", expect: "http://example.com:11434"},
|
||||
"trailing slash port": {value: "example.com:1234/", expect: "http://example.com:1234"},
|
||||
}
|
||||
|
||||
for k, v := range testCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
t.Setenv("OLLAMA_HOST", v.value)
|
||||
|
||||
client, err := ClientFromEnvironment()
|
||||
if err != v.err {
|
||||
t.Fatalf("expected %s, got %s", v.err, err)
|
||||
}
|
||||
|
||||
if client.base.String() != v.expect {
|
||||
t.Fatalf("expected %s, got %s", v.expect, client.base.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
422
api/types.go
422
api/types.go
@@ -3,10 +3,10 @@ package api
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -31,93 +31,48 @@ func (e StatusError) Error() string {
|
||||
}
|
||||
}
|
||||
|
||||
type ImageData []byte
|
||||
|
||||
type GenerateRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
System string `json:"system"`
|
||||
Template string `json:"template"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
System string `json:"system"`
|
||||
Template string `json:"template"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Format string `json:"format"`
|
||||
Images []ImageData `json:"images,omitempty"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
type ChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
Format string `json:"format"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
type Message struct {
|
||||
Role string `json:"role"` // one of ["system", "user", "assistant"]
|
||||
Content string `json:"content"`
|
||||
Images []ImageData `json:"images, omitempty"`
|
||||
}
|
||||
|
||||
type CreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
type DeleteRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowResponse struct {
|
||||
License string `json:"license,omitempty"`
|
||||
Modelfile string `json:"modelfile,omitempty"`
|
||||
Parameters string `json:"parameters,omitempty"`
|
||||
Template string `json:"template,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
}
|
||||
|
||||
type CopyRequest struct {
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
}
|
||||
|
||||
type PullRequest struct {
|
||||
Name string `json:"name"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
}
|
||||
|
||||
type ProgressResponse struct {
|
||||
Status string `json:"status"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Total int64 `json:"total,omitempty"`
|
||||
Completed int64 `json:"completed,omitempty"`
|
||||
}
|
||||
|
||||
type PushRequest struct {
|
||||
Name string `json:"name"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
Models []ModelResponse `json:"models"`
|
||||
}
|
||||
|
||||
type ModelResponse struct {
|
||||
Name string `json:"name"`
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Size int64 `json:"size"`
|
||||
Digest string `json:"digest"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type GenerateResponse struct {
|
||||
type ChatResponse struct {
|
||||
Model string `json:"model"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Response string `json:"response,omitempty"`
|
||||
Message *Message `json:"message,omitempty"`
|
||||
|
||||
Done bool `json:"done"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
Done bool `json:"done"`
|
||||
|
||||
Metrics
|
||||
}
|
||||
|
||||
type Metrics struct {
|
||||
TotalDuration time.Duration `json:"total_duration,omitempty"`
|
||||
LoadDuration time.Duration `json:"load_duration,omitempty"`
|
||||
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
|
||||
@@ -126,58 +81,13 @@ type GenerateResponse struct {
|
||||
EvalDuration time.Duration `json:"eval_duration,omitempty"`
|
||||
}
|
||||
|
||||
func (r *GenerateResponse) Summary() {
|
||||
if r.TotalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "total duration: %v\n", r.TotalDuration)
|
||||
}
|
||||
|
||||
if r.LoadDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "load duration: %v\n", r.LoadDuration)
|
||||
}
|
||||
|
||||
if r.PromptEvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
|
||||
}
|
||||
|
||||
if r.PromptEvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", r.PromptEvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(r.PromptEvalCount)/r.PromptEvalDuration.Seconds())
|
||||
}
|
||||
|
||||
if r.EvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", r.EvalCount)
|
||||
}
|
||||
|
||||
if r.EvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval duration: %s\n", r.EvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(r.EvalCount)/r.EvalDuration.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// Options specfied in GenerateRequest, if you add a new option here add it to the API docs also
|
||||
type Options struct {
|
||||
Seed int `json:"seed,omitempty"`
|
||||
Runner
|
||||
|
||||
// Backend options
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
|
||||
// Model options
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
NumKeep int `json:"num_keep,omitempty"`
|
||||
NumBatch int `json:"num_batch,omitempty"`
|
||||
NumGQA int `json:"num_gqa,omitempty"`
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
MainGPU int `json:"main_gpu,omitempty"`
|
||||
LowVRAM bool `json:"low_vram,omitempty"`
|
||||
F16KV bool `json:"f16_kv,omitempty"`
|
||||
LogitsAll bool `json:"logits_all,omitempty"`
|
||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||
UseMMap bool `json:"use_mmap,omitempty"`
|
||||
UseMLock bool `json:"use_mlock,omitempty"`
|
||||
EmbeddingOnly bool `json:"embedding_only,omitempty"`
|
||||
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
|
||||
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
|
||||
|
||||
// Predict options
|
||||
// Predict options used at runtime
|
||||
NumKeep int `json:"num_keep,omitempty"`
|
||||
Seed int `json:"seed,omitempty"`
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP float32 `json:"top_p,omitempty"`
|
||||
@@ -193,10 +103,156 @@ type Options struct {
|
||||
MirostatEta float32 `json:"mirostat_eta,omitempty"`
|
||||
PenalizeNewline bool `json:"penalize_newline,omitempty"`
|
||||
Stop []string `json:"stop,omitempty"`
|
||||
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
}
|
||||
|
||||
// Runner options which must be set when the model is loaded into memory
|
||||
type Runner struct {
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
NumBatch int `json:"num_batch,omitempty"`
|
||||
NumGQA int `json:"num_gqa,omitempty"`
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
MainGPU int `json:"main_gpu,omitempty"`
|
||||
LowVRAM bool `json:"low_vram,omitempty"`
|
||||
F16KV bool `json:"f16_kv,omitempty"`
|
||||
LogitsAll bool `json:"logits_all,omitempty"`
|
||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||
UseMMap bool `json:"use_mmap,omitempty"`
|
||||
UseMLock bool `json:"use_mlock,omitempty"`
|
||||
EmbeddingOnly bool `json:"embedding_only,omitempty"`
|
||||
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
|
||||
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
}
|
||||
|
||||
type CreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Modelfile string `json:"modelfile"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type DeleteRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowResponse struct {
|
||||
License string `json:"license,omitempty"`
|
||||
Modelfile string `json:"modelfile,omitempty"`
|
||||
Parameters string `json:"parameters,omitempty"`
|
||||
Template string `json:"template,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
type CopyRequest struct {
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
}
|
||||
|
||||
type PullRequest struct {
|
||||
Name string `json:"name"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type ProgressResponse struct {
|
||||
Status string `json:"status"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Total int64 `json:"total,omitempty"`
|
||||
Completed int64 `json:"completed,omitempty"`
|
||||
}
|
||||
|
||||
type PushRequest struct {
|
||||
Name string `json:"name"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
Models []ModelResponse `json:"models"`
|
||||
}
|
||||
|
||||
type ModelResponse struct {
|
||||
Name string `json:"name"`
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Size int64 `json:"size"`
|
||||
Digest string `json:"digest"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type GenerateResponse struct {
|
||||
Model string `json:"model"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Response string `json:"response"`
|
||||
|
||||
Done bool `json:"done"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
|
||||
Metrics
|
||||
}
|
||||
|
||||
type ModelDetails struct {
|
||||
Format string `json:"format"`
|
||||
Family string `json:"family"`
|
||||
Families []string `json:"families"`
|
||||
ParameterSize string `json:"parameter_size"`
|
||||
QuantizationLevel string `json:"quantization_level"`
|
||||
}
|
||||
|
||||
func (m *Metrics) Summary() {
|
||||
if m.TotalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "total duration: %v\n", m.TotalDuration)
|
||||
}
|
||||
|
||||
if m.LoadDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "load duration: %v\n", m.LoadDuration)
|
||||
}
|
||||
|
||||
if m.PromptEvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", m.PromptEvalCount)
|
||||
}
|
||||
|
||||
if m.PromptEvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", m.PromptEvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(m.PromptEvalCount)/m.PromptEvalDuration.Seconds())
|
||||
}
|
||||
|
||||
if m.EvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", m.EvalCount)
|
||||
}
|
||||
|
||||
if m.EvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval duration: %s\n", m.EvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(m.EvalCount)/m.EvalDuration.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
var ErrInvalidOpts = fmt.Errorf("invalid options")
|
||||
|
||||
func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
||||
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct
|
||||
@@ -210,6 +266,7 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
invalidOpts := []string{}
|
||||
for key, val := range m {
|
||||
if opt, ok := jsonOpts[key]; ok {
|
||||
field := valueOpts.FieldByName(opt.Name)
|
||||
@@ -227,44 +284,39 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
// when JSON unmarshals numbers, it uses float64, not int
|
||||
field.SetInt(int64(t))
|
||||
default:
|
||||
log.Printf("could not convert model parameter %v to int, skipped", key)
|
||||
return fmt.Errorf("option %q must be of type integer", key)
|
||||
}
|
||||
case reflect.Bool:
|
||||
val, ok := val.(bool)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parameter %v to bool, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type boolean", key)
|
||||
}
|
||||
field.SetBool(val)
|
||||
case reflect.Float32:
|
||||
// JSON unmarshals to float64
|
||||
val, ok := val.(float64)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parameter %v to float32, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type float32", key)
|
||||
}
|
||||
field.SetFloat(val)
|
||||
case reflect.String:
|
||||
val, ok := val.(string)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parameter %v to string, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type string", key)
|
||||
}
|
||||
field.SetString(val)
|
||||
case reflect.Slice:
|
||||
// JSON unmarshals to []interface{}, not []string
|
||||
val, ok := val.([]interface{})
|
||||
if !ok {
|
||||
log.Printf("could not convert model parameter %v to slice, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of type array", key)
|
||||
}
|
||||
// convert []interface{} to []string
|
||||
slice := make([]string, len(val))
|
||||
for i, item := range val {
|
||||
str, ok := item.(string)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parameter %v to slice of strings, skipped", key)
|
||||
continue
|
||||
return fmt.Errorf("option %q must be of an array of strings", key)
|
||||
}
|
||||
slice[i] = str
|
||||
}
|
||||
@@ -273,8 +325,14 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
invalidOpts = append(invalidOpts, key)
|
||||
}
|
||||
}
|
||||
|
||||
if len(invalidOpts) > 0 {
|
||||
return fmt.Errorf("%w: %v", ErrInvalidOpts, strings.Join(invalidOpts, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -282,7 +340,7 @@ func DefaultOptions() Options {
|
||||
return Options{
|
||||
// options set on request to runner
|
||||
NumPredict: -1,
|
||||
NumKeep: -1,
|
||||
NumKeep: 0,
|
||||
Temperature: 0.8,
|
||||
TopK: 40,
|
||||
TopP: 0.9,
|
||||
@@ -298,20 +356,22 @@ func DefaultOptions() Options {
|
||||
PenalizeNewline: true,
|
||||
Seed: -1,
|
||||
|
||||
// options set when the model is loaded
|
||||
NumCtx: 2048,
|
||||
RopeFrequencyBase: 10000.0,
|
||||
RopeFrequencyScale: 1.0,
|
||||
NumBatch: 512,
|
||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||
NumGQA: 1,
|
||||
NumThread: 0, // let the runtime decide
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMLock: false,
|
||||
UseMMap: true,
|
||||
UseNUMA: false,
|
||||
EmbeddingOnly: true,
|
||||
Runner: Runner{
|
||||
// options set when the model is loaded
|
||||
NumCtx: 2048,
|
||||
RopeFrequencyBase: 10000.0,
|
||||
RopeFrequencyScale: 1.0,
|
||||
NumBatch: 512,
|
||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||
NumGQA: 1,
|
||||
NumThread: 0, // let the runtime decide
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMLock: false,
|
||||
UseMMap: true,
|
||||
UseNUMA: false,
|
||||
EmbeddingOnly: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,3 +403,63 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FormatParams converts specified parameter options to their correct types
|
||||
func FormatParams(params map[string][]string) (map[string]interface{}, error) {
|
||||
opts := Options{}
|
||||
valueOpts := reflect.ValueOf(&opts).Elem() // names of the fields in the options struct
|
||||
typeOpts := reflect.TypeOf(opts) // types of the fields in the options struct
|
||||
|
||||
// build map of json struct tags to their types
|
||||
jsonOpts := make(map[string]reflect.StructField)
|
||||
for _, field := range reflect.VisibleFields(typeOpts) {
|
||||
jsonTag := strings.Split(field.Tag.Get("json"), ",")[0]
|
||||
if jsonTag != "" {
|
||||
jsonOpts[jsonTag] = field
|
||||
}
|
||||
}
|
||||
|
||||
out := make(map[string]interface{})
|
||||
// iterate params and set values based on json struct tags
|
||||
for key, vals := range params {
|
||||
if opt, ok := jsonOpts[key]; !ok {
|
||||
return nil, fmt.Errorf("unknown parameter '%s'", key)
|
||||
} else {
|
||||
field := valueOpts.FieldByName(opt.Name)
|
||||
if field.IsValid() && field.CanSet() {
|
||||
switch field.Kind() {
|
||||
case reflect.Float32:
|
||||
floatVal, err := strconv.ParseFloat(vals[0], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid float value %s", vals)
|
||||
}
|
||||
|
||||
out[key] = float32(floatVal)
|
||||
case reflect.Int:
|
||||
intVal, err := strconv.ParseInt(vals[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid int value %s", vals)
|
||||
}
|
||||
|
||||
out[key] = intVal
|
||||
case reflect.Bool:
|
||||
boolVal, err := strconv.ParseBool(vals[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid bool value %s", vals)
|
||||
}
|
||||
|
||||
out[key] = boolVal
|
||||
case reflect.String:
|
||||
out[key] = vals[0]
|
||||
case reflect.Slice:
|
||||
// TODO: only string slices are supported right now
|
||||
out[key] = vals
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
@@ -47,16 +47,6 @@ const config: ForgeConfig = {
|
||||
},
|
||||
rebuildConfig: {},
|
||||
makers: [new MakerSquirrel({}), new MakerZIP({}, ['darwin'])],
|
||||
publishers: [
|
||||
new PublisherGithub({
|
||||
repository: {
|
||||
name: 'ollama',
|
||||
owner: 'jmorganca',
|
||||
},
|
||||
draft: false,
|
||||
prerelease: true,
|
||||
}),
|
||||
],
|
||||
hooks: {
|
||||
readPackageJson: async (_, packageJson) => {
|
||||
return { ...packageJson, version: process.env.VERSION || packageJson.version }
|
||||
|
992
app/package-lock.json
generated
992
app/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -46,7 +46,7 @@
|
||||
"chmodr": "^1.2.0",
|
||||
"copy-webpack-plugin": "^11.0.0",
|
||||
"css-loader": "^6.8.1",
|
||||
"electron": "25.2.0",
|
||||
"electron": "25.9.2",
|
||||
"eslint": "^8.43.0",
|
||||
"eslint-plugin-import": "^2.27.5",
|
||||
"fork-ts-checker-webpack-plugin": "^7.3.0",
|
||||
|
@@ -162,13 +162,56 @@ app.on('before-quit', () => {
|
||||
}
|
||||
})
|
||||
|
||||
const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${
|
||||
process.arch
|
||||
}&version=${app.getVersion()}&id=${id()}`
|
||||
|
||||
let latest = ''
|
||||
async function isNewReleaseAvailable() {
|
||||
try {
|
||||
const response = await fetch(updateURL)
|
||||
|
||||
if (!response.ok) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (response.status === 204) {
|
||||
return false
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
const url = data?.url
|
||||
if (!url) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (latest === url) {
|
||||
return false
|
||||
}
|
||||
|
||||
latest = url
|
||||
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error(`update check failed - ${error}`)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
async function checkUpdate() {
|
||||
const available = await isNewReleaseAvailable()
|
||||
if (available) {
|
||||
logger.info('checking for update')
|
||||
autoUpdater.checkForUpdates()
|
||||
}
|
||||
}
|
||||
|
||||
function init() {
|
||||
if (app.isPackaged) {
|
||||
autoUpdater.checkForUpdates()
|
||||
checkUpdate()
|
||||
setInterval(() => {
|
||||
if (!updateAvailable) {
|
||||
autoUpdater.checkForUpdates()
|
||||
}
|
||||
checkUpdate()
|
||||
}, 60 * 60 * 1000)
|
||||
}
|
||||
|
||||
@@ -246,11 +289,7 @@ function id(): string {
|
||||
return uuid
|
||||
}
|
||||
|
||||
autoUpdater.setFeedURL({
|
||||
url: `https://ollama.ai/api/update?os=${process.platform}&arch=${
|
||||
process.arch
|
||||
}&version=${app.getVersion()}&id=${id()}`,
|
||||
})
|
||||
autoUpdater.setFeedURL({ url: updateURL })
|
||||
|
||||
autoUpdater.on('error', e => {
|
||||
logger.error(`update check failed - ${e.message}`)
|
||||
|
957
cmd/cmd.go
957
cmd/cmd.go
File diff suppressed because it is too large
Load Diff
@@ -1,44 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/jmorganca/ollama/progressbar"
|
||||
)
|
||||
|
||||
type Spinner struct {
|
||||
description string
|
||||
*progressbar.ProgressBar
|
||||
}
|
||||
|
||||
func NewSpinner(description string) *Spinner {
|
||||
return &Spinner{
|
||||
description: description,
|
||||
ProgressBar: progressbar.NewOptions(-1,
|
||||
progressbar.OptionSetWriter(os.Stderr),
|
||||
progressbar.OptionThrottle(60*time.Millisecond),
|
||||
progressbar.OptionSpinnerType(14),
|
||||
progressbar.OptionSetRenderBlankState(true),
|
||||
progressbar.OptionSetElapsedTime(false),
|
||||
progressbar.OptionClearOnFinish(),
|
||||
progressbar.OptionSetDescription(description),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spinner) Spin(tick time.Duration) {
|
||||
for range time.Tick(tick) {
|
||||
if s.IsFinished() {
|
||||
break
|
||||
}
|
||||
|
||||
s.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spinner) Stop() {
|
||||
s.Finish()
|
||||
fmt.Println(s.description)
|
||||
}
|
619
docs/api.md
619
docs/api.md
@@ -3,6 +3,7 @@
|
||||
## Endpoints
|
||||
|
||||
- [Generate a completion](#generate-a-completion)
|
||||
- [Generate a chat completion](#generate-a-chat-completion)
|
||||
- [Create a Model](#create-a-model)
|
||||
- [List Local Models](#list-local-models)
|
||||
- [Show Model Information](#show-model-information)
|
||||
@@ -12,7 +13,6 @@
|
||||
- [Push a Model](#push-a-model)
|
||||
- [Generate Embeddings](#generate-embeddings)
|
||||
|
||||
|
||||
## Conventions
|
||||
|
||||
### Model names
|
||||
@@ -25,7 +25,7 @@ All durations are returned in nanoseconds.
|
||||
|
||||
### Streaming responses
|
||||
|
||||
Certain endpoints stream responses as JSON objects delineated with the newline (`\n`) character.
|
||||
Certain endpoints stream responses as JSON objects.
|
||||
|
||||
## Generate a completion
|
||||
|
||||
@@ -33,36 +33,48 @@ Certain endpoints stream responses as JSON objects delineated with the newline (
|
||||
POST /api/generate
|
||||
```
|
||||
|
||||
Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses. The final response object will include statistics and additional data from the request.
|
||||
Generate a response for a given prompt with a provided model. This is a streaming endpoint, so there will be a series of responses. The final response object will include statistics and additional data from the request.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `model`: (required) the [model name](#model-names)
|
||||
- `prompt`: the prompt to generate a response for
|
||||
- `images`: a list of base64-encoded images (for multimodal models such as `llava`)
|
||||
|
||||
Advanced parameters:
|
||||
Advanced parameters (optional):
|
||||
|
||||
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||
- `system`: system prompt to (overrides what is defined in the `Modelfile`)
|
||||
- `system`: system message to (overrides what is defined in the `Modelfile`)
|
||||
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
|
||||
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
|
||||
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||
- `raw`: if `true` no formatting will be applied to the prompt. You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API.
|
||||
|
||||
### Request
|
||||
### JSON mode
|
||||
|
||||
Enable JSON mode by setting the `format` parameter to `json`. This will structure the response as valid JSON. See the JSON mode [example](#request-json-mode) below.
|
||||
|
||||
> Note: it's important to instruct the model to use JSON in the `prompt`. Otherwise, the model may generate large amounts whitespace.
|
||||
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2:7b",
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2",
|
||||
"prompt": "Why is the sky blue?"
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
#### Response
|
||||
|
||||
A stream of JSON objects:
|
||||
A stream of JSON objects is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2:7b",
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||
"response": "The",
|
||||
"done": false
|
||||
@@ -80,13 +92,15 @@ The final response in the stream also includes additional data about the generat
|
||||
- `eval_count`: number of tokens the response
|
||||
- `eval_duration`: time in nanoseconds spent generating the response
|
||||
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
|
||||
- `response`: empty if the response was streamed, if not streamed, this will contain the full response
|
||||
|
||||
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`.
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2:7b",
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"response": "",
|
||||
"context": [1, 2, 3],
|
||||
"done": true,
|
||||
"total_duration": 5589157167,
|
||||
@@ -100,29 +114,395 @@ To calculate how fast the response is generated in tokens per second (token/s),
|
||||
}
|
||||
```
|
||||
|
||||
#### Request (No streaming)
|
||||
|
||||
A response can be recieved in one reply when streaming is off.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2",
|
||||
"prompt": "Why is the sky blue?",
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
If `stream` is set to `false`, the response will be a single JSON object:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"response": "The sky is blue because it is the color of the sky.",
|
||||
"context": [1, 2, 3],
|
||||
"done": true,
|
||||
"total_duration": 5589157167,
|
||||
"load_duration": 3013701500,
|
||||
"sample_count": 114,
|
||||
"sample_duration": 81442000,
|
||||
"prompt_eval_count": 46,
|
||||
"prompt_eval_duration": 1160282000,
|
||||
"eval_count": 13,
|
||||
"eval_duration": 1325948000
|
||||
}
|
||||
```
|
||||
|
||||
#### Request (with images)
|
||||
|
||||
To submit images to multimodal models such as `llava` or `bakllava`, provide a list of base64-encoded `images`:
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llava",
|
||||
"prompt":"What is in this picture?",
|
||||
"stream": false,
|
||||
"images": ["iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"]
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
```
|
||||
{
|
||||
"model": "llava",
|
||||
"created_at": "2023-11-03T15:36:02.583064Z",
|
||||
"response": "A happy cartoon character, which is cute and cheerful.",
|
||||
"context": [1, 2, 3],
|
||||
"done": true,
|
||||
"total_duration": 14648695333,
|
||||
"load_duration": 3302671417,
|
||||
"prompt_eval_count": 14,
|
||||
"prompt_eval_duration": 286243000,
|
||||
"eval_count": 129,
|
||||
"eval_duration": 10931424000
|
||||
}
|
||||
```
|
||||
|
||||
#### Request (Raw Mode)
|
||||
|
||||
In some cases you may wish to bypass the templating system and provide a full prompt. In this case, you can use the `raw` parameter to disable formatting.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "mistral",
|
||||
"prompt": "[INST] why is the sky blue? [/INST]",
|
||||
"raw": true,
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "mistral",
|
||||
"created_at": "2023-11-03T15:36:02.583064Z",
|
||||
"response": " The sky appears blue because of a phenomenon called Rayleigh scattering.",
|
||||
"context": [1, 2, 3],
|
||||
"done": true,
|
||||
"total_duration": 14648695333,
|
||||
"load_duration": 3302671417,
|
||||
"prompt_eval_count": 14,
|
||||
"prompt_eval_duration": 286243000,
|
||||
"eval_count": 129,
|
||||
"eval_duration": 10931424000
|
||||
}
|
||||
```
|
||||
|
||||
#### Request (JSON mode)
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2",
|
||||
"prompt": "What color is the sky at different times of the day? Respond using JSON",
|
||||
"format": "json",
|
||||
"stream": false
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2",
|
||||
"created_at": "2023-11-09T21:07:55.186497Z",
|
||||
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
||||
"done": true,
|
||||
"total_duration": 4661289125,
|
||||
"load_duration": 1714434500,
|
||||
"prompt_eval_count": 36,
|
||||
"prompt_eval_duration": 264132000,
|
||||
"eval_count": 75,
|
||||
"eval_duration": 2112149000
|
||||
}
|
||||
```
|
||||
|
||||
The value of `response` will be a string containing JSON similar to:
|
||||
|
||||
```json
|
||||
{
|
||||
"morning": {
|
||||
"color": "blue"
|
||||
},
|
||||
"noon": {
|
||||
"color": "blue-gray"
|
||||
},
|
||||
"afternoon": {
|
||||
"color": "warm gray"
|
||||
},
|
||||
"evening": {
|
||||
"color": "orange"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Request (With options)
|
||||
|
||||
If you want to set custom options for the model at runtime rather than in the Modelfile, you can do so with the `options` parameter. This example sets every available option, but you can set any of them individually and omit the ones you do not want to override.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2",
|
||||
"prompt": "Why is the sky blue?",
|
||||
"stream": false,
|
||||
"options": {
|
||||
"num_keep": 5,
|
||||
"seed": 42,
|
||||
"num_predict": 100,
|
||||
"top_k": 20,
|
||||
"top_p": 0.9,
|
||||
"tfs_z": 0.5,
|
||||
"typical_p": 0.7,
|
||||
"repeat_last_n": 33,
|
||||
"temperature": 0.8,
|
||||
"repeat_penalty": 1.2,
|
||||
"presence_penalty": 1.5,
|
||||
"frequency_penalty": 1.0,
|
||||
"mirostat": 1,
|
||||
"mirostat_tau": 0.8,
|
||||
"mirostat_eta": 0.6,
|
||||
"penalize_newline": true,
|
||||
"stop": ["\n", "user:"],
|
||||
"numa": false,
|
||||
"num_ctx": 1024,
|
||||
"num_batch": 2,
|
||||
"num_gqa": 1,
|
||||
"num_gpu": 1,
|
||||
"main_gpu": 0,
|
||||
"low_vram": false,
|
||||
"f16_kv": true,
|
||||
"logits_all": false,
|
||||
"vocab_only": false,
|
||||
"use_mmap": true,
|
||||
"use_mlock": false,
|
||||
"embedding_only": false,
|
||||
"rope_frequency_base": 1.1,
|
||||
"rope_frequency_scale": 0.8,
|
||||
"num_thread": 8
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"response": "The sky is blue because it is the color of the sky.",
|
||||
"done": true,
|
||||
"total_duration": 5589157167,
|
||||
"load_duration": 3013701500,
|
||||
"sample_count": 114,
|
||||
"sample_duration": 81442000,
|
||||
"prompt_eval_count": 46,
|
||||
"prompt_eval_duration": 1160282000,
|
||||
"eval_count": 13,
|
||||
"eval_duration": 1325948000
|
||||
}
|
||||
```
|
||||
|
||||
## Generate a chat completion
|
||||
|
||||
```shell
|
||||
POST /api/chat
|
||||
```
|
||||
|
||||
Generate the next message in a chat with a provided model. This is a streaming endpoint, so there will be a series of responses. The final response object will include statistics and additional data from the request.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `model`: (required) the [model name](#model-names)
|
||||
- `messages`: the messages of the chat, this can be used to keep a chat memory
|
||||
|
||||
The `message` object has the following fields:
|
||||
|
||||
- `role`: the role of the message, either `system`, `user` or `assistant`
|
||||
- `content`: the content of the message
|
||||
- `images` (optional): a list of images to include in the message (for multimodal models such as `llava`)
|
||||
|
||||
Advanced parameters (optional):
|
||||
|
||||
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
|
||||
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
Send a chat message with a streaming response.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama2",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "why is the sky blue?"
|
||||
}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
A stream of JSON objects is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||
"message": {
|
||||
"role": "assisant",
|
||||
"content": "The"
|
||||
},
|
||||
"done": false
|
||||
}
|
||||
```
|
||||
|
||||
Final response:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"done": true,
|
||||
"total_duration": 5589157167,
|
||||
"load_duration": 3013701500,
|
||||
"sample_count": 114,
|
||||
"sample_duration": 81442000,
|
||||
"prompt_eval_count": 46,
|
||||
"prompt_eval_duration": 1160282000,
|
||||
"eval_count": 113,
|
||||
"eval_duration": 1325948000
|
||||
}
|
||||
```
|
||||
|
||||
#### Request (With History)
|
||||
|
||||
Send a chat message with a conversation history.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama2",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "why is the sky blue?"
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "due to rayleigh scattering."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "how is that different than mie scattering?"
|
||||
}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
A stream of JSON objects is returned:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||
"message": {
|
||||
"role": "assisant",
|
||||
"content": "The"
|
||||
},
|
||||
"done": false
|
||||
}
|
||||
```
|
||||
|
||||
Final response:
|
||||
|
||||
```json
|
||||
{
|
||||
"model": "llama2",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"done": true,
|
||||
"total_duration": 5589157167,
|
||||
"load_duration": 3013701500,
|
||||
"sample_count": 114,
|
||||
"sample_duration": 81442000,
|
||||
"prompt_eval_count": 46,
|
||||
"prompt_eval_duration": 1160282000,
|
||||
"eval_count": 113,
|
||||
"eval_duration": 1325948000
|
||||
}
|
||||
```
|
||||
|
||||
#### Request (with images)
|
||||
|
||||
Send a chat message with a conversation history.
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "llama2",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "what is in this image?",
|
||||
"images": ["iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"]
|
||||
},
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
## Create a Model
|
||||
|
||||
```shell
|
||||
POST /api/create
|
||||
```
|
||||
|
||||
Create a model from a [`Modelfile`](./modelfile.md)
|
||||
Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation should also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using [Create a Blob](#create-a-blob) and the value to the path indicated in the response.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to create
|
||||
- `path`: path to the Modelfile
|
||||
- `modelfile` (optional): contents of the Modelfile
|
||||
- `stream`: (optional) if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||
- `path` (optional): path to the Modelfile
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/create -d '{
|
||||
curl http://localhost:11434/api/create -d '{
|
||||
"name": "mario",
|
||||
"path": "~/Modelfile"
|
||||
"modelfile": "FROM llama2\nSYSTEM You are mario from Super Mario Bros."
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
#### Response
|
||||
|
||||
A stream of JSON objects. When finished, `status` is `success`.
|
||||
|
||||
@@ -132,6 +512,54 @@ A stream of JSON objects. When finished, `status` is `success`.
|
||||
}
|
||||
```
|
||||
|
||||
### Check if a Blob Exists
|
||||
|
||||
```shell
|
||||
HEAD /api/blobs/:digest
|
||||
```
|
||||
|
||||
Check if a blob is known to the server.
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
- `digest`: the SHA256 digest of the blob
|
||||
|
||||
#### Examples
|
||||
|
||||
##### Request
|
||||
|
||||
```shell
|
||||
curl -I http://localhost:11434/api/blobs/sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2
|
||||
```
|
||||
|
||||
##### Response
|
||||
|
||||
Return 200 OK if the blob exists, 404 Not Found if it does not.
|
||||
|
||||
### Create a Blob
|
||||
|
||||
```shell
|
||||
POST /api/blobs/:digest
|
||||
```
|
||||
|
||||
Create a blob from a file. Returns the server file path.
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
- `digest`: the expected SHA256 digest of the file
|
||||
|
||||
#### Examples
|
||||
|
||||
##### Request
|
||||
|
||||
```shell
|
||||
curl -T model.bin -X POST http://localhost:11434/api/blobs/sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2
|
||||
```
|
||||
|
||||
##### Response
|
||||
|
||||
Return 201 Created if the blob was successfully created.
|
||||
|
||||
## List Local Models
|
||||
|
||||
```shell
|
||||
@@ -140,19 +568,23 @@ GET /api/tags
|
||||
|
||||
List models that are available locally.
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/tags
|
||||
```
|
||||
|
||||
### Response
|
||||
#### Response
|
||||
|
||||
A single JSON object will be returned.
|
||||
|
||||
```json
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "llama2:7b",
|
||||
"name": "llama2",
|
||||
"modified_at": "2023-08-02T17:02:23.713454393-07:00",
|
||||
"size": 3791730596
|
||||
},
|
||||
@@ -171,28 +603,36 @@ curl http://localhost:11434/api/tags
|
||||
POST /api/show
|
||||
```
|
||||
|
||||
Show details about a model including modelfile, template, parameters, license, and system prompt.
|
||||
Show information about a model including details, modelfile, template, parameters, license, and system prompt.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to show
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
```shell
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/show -d '{
|
||||
"name": "llama2:7b"
|
||||
"name": "llama2"
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
#### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"license": "<contents of license block>",
|
||||
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llama2:latest\n\nFROM /Users/username/.ollama/models/blobs/sha256:8daa9615cce30c259a9555b1cc250d461d1bc69980a274b44d7eda0be78076d8\nTEMPLATE \"\"\"[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] \"\"\"\nSYSTEM \"\"\"\"\"\"\nPARAMETER stop [INST]\nPARAMETER stop [/INST]\nPARAMETER stop <<SYS>>\nPARAMETER stop <</SYS>>\n",
|
||||
"parameters": "stop [INST]\nstop [/INST]\nstop <<SYS>>\nstop <</SYS>>",
|
||||
"template": "[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] "
|
||||
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM mike/llava:latest\nTEMPLATE \"\"\"\nUSER:{{ .Prompt }}\nASSISTANT:\n\"\"\"\nPARAMETER num_ctx 4096",
|
||||
"parameters": "num_ctx 4096",
|
||||
"template": "\nUSER:{{ .Prompt }}\nASSISTANT:\n",
|
||||
"license:": "<license>",
|
||||
"details": {
|
||||
"format": "gguf",
|
||||
"families": ["llama", "clip"],
|
||||
"parameter_size": "7B",
|
||||
"quantization_level": "Q4_0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -204,15 +644,21 @@ POST /api/copy
|
||||
|
||||
Copy a model. Creates a model with another name from an existing model.
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/copy -d '{
|
||||
"source": "llama2:7b",
|
||||
"source": "llama2",
|
||||
"destination": "llama2-backup"
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
The only response is a 200 OK if successful.
|
||||
|
||||
## Delete a Model
|
||||
|
||||
```shell
|
||||
@@ -223,9 +669,11 @@ Delete a model and its data.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `model`: model name to delete
|
||||
- `name`: model name to delete
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl -X DELETE http://localhost:11434/api/delete -d '{
|
||||
@@ -233,6 +681,10 @@ curl -X DELETE http://localhost:11434/api/delete -d '{
|
||||
}'
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
If successful, the only response is a 200 OK.
|
||||
|
||||
## Pull a Model
|
||||
|
||||
```shell
|
||||
@@ -245,22 +697,63 @@ Download a model from the ollama library. Cancelled pulls are resumed from where
|
||||
|
||||
- `name`: name of the model to pull
|
||||
- `insecure`: (optional) allow insecure connections to the library. Only use this if you are pulling from your own library during development.
|
||||
- `stream`: (optional) if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/pull -d '{
|
||||
"name": "llama2:7b"
|
||||
curl http://localhost:11434/api/pull -d '{
|
||||
"name": "llama2"
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
#### Response
|
||||
|
||||
If `stream` is not specified, or set to `true`, a stream of JSON objects is returned:
|
||||
|
||||
The first object is the manifest:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "pulling manifest"
|
||||
}
|
||||
```
|
||||
|
||||
Then there is a series of downloading responses. Until any of the download is completed, the `completed` key may not be included. The number of files to be downloaded depends on the number of layers specified in the manifest.
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "downloading digestname",
|
||||
"digest": "digestname",
|
||||
"total": 2142590208
|
||||
"total": 2142590208,
|
||||
"completed": 241970
|
||||
}
|
||||
```
|
||||
|
||||
After all the files are downloaded, the final responses are:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "verifying sha256 digest"
|
||||
}
|
||||
{
|
||||
"status": "writing manifest"
|
||||
}
|
||||
{
|
||||
"status": "removing any unused layers"
|
||||
}
|
||||
{
|
||||
"status": "success"
|
||||
}
|
||||
```
|
||||
|
||||
if `stream` is set to false, then the response is a single JSON object:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "success"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -275,30 +768,34 @@ Upload a model to a model library. Requires registering for ollama.ai and adding
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to push in the form of `<namespace>/<model>:<tag>`
|
||||
- `insecure`: (optional) allow insecure connections to the library. Only use this if you are pushing to your library during development.
|
||||
- `insecure`: (optional) allow insecure connections to the library. Only use this if you are pushing to your library during development.
|
||||
- `stream`: (optional) if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/push -d '{
|
||||
curl http://localhost:11434/api/push -d '{
|
||||
"name": "mattw/pygmalion:latest"
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
#### Response
|
||||
|
||||
Streaming response that starts with:
|
||||
If `stream` is not specified, or set to `true`, a stream of JSON objects is returned:
|
||||
|
||||
```json
|
||||
{"status":"retrieving manifest"}
|
||||
{ "status": "retrieving manifest" }
|
||||
```
|
||||
|
||||
and then:
|
||||
|
||||
```json
|
||||
{
|
||||
"status":"starting upload","digest":"sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
|
||||
"total":1928429856
|
||||
"status": "starting upload",
|
||||
"digest": "sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
|
||||
"total": 1928429856
|
||||
}
|
||||
```
|
||||
|
||||
@@ -306,9 +803,10 @@ Then there is a series of uploading responses:
|
||||
|
||||
```json
|
||||
{
|
||||
"status":"starting upload",
|
||||
"digest":"sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
|
||||
"total":1928429856}
|
||||
"status": "starting upload",
|
||||
"digest": "sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
|
||||
"total": 1928429856
|
||||
}
|
||||
```
|
||||
|
||||
Finally, when the upload is complete:
|
||||
@@ -318,6 +816,12 @@ Finally, when the upload is complete:
|
||||
{"status":"success"}
|
||||
```
|
||||
|
||||
If `stream` is set to `false`, then the response is a single JSON object:
|
||||
|
||||
```json
|
||||
{ "status": "success" }
|
||||
```
|
||||
|
||||
## Generate Embeddings
|
||||
|
||||
```shell
|
||||
@@ -335,21 +839,24 @@ Advanced parameters:
|
||||
|
||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||
|
||||
### Request
|
||||
### Examples
|
||||
|
||||
#### Request
|
||||
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/embeddings -d '{
|
||||
"model": "llama2:7b",
|
||||
curl http://localhost:11434/api/embeddings -d '{
|
||||
"model": "llama2",
|
||||
"prompt": "Here is an article about llamas..."
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
#### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"embeddings": [
|
||||
"embedding": [
|
||||
0.5670403838157654, 0.009260174818336964, 0.23178744316101074, -0.2916173040866852, -0.8924556970596313,
|
||||
0.8785552978515625, -0.34576427936553955, 0.5742510557174683, -0.04222835972905159, -0.137906014919281
|
||||
]
|
||||
}```
|
||||
}
|
||||
```
|
||||
|
150
docs/faq.md
150
docs/faq.md
@@ -1,18 +1,156 @@
|
||||
# FAQ
|
||||
|
||||
## How can I expose the Ollama server?
|
||||
## How can I view the logs?
|
||||
|
||||
```bash
|
||||
OLLAMA_HOST=0.0.0.0:11435 ollama serve
|
||||
On macOS:
|
||||
|
||||
```
|
||||
cat ~/.ollama/logs/server.log
|
||||
```
|
||||
|
||||
By default, Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0`. To support more origins, you can use the `OLLAMA_ORIGINS` environment variable:
|
||||
On Linux:
|
||||
|
||||
```
|
||||
journalctl -u ollama
|
||||
```
|
||||
|
||||
If you're running `ollama serve` directly, the logs will be printed to the console.
|
||||
|
||||
## How can I expose Ollama on my network?
|
||||
|
||||
Ollama binds to 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
|
||||
|
||||
On macOS:
|
||||
|
||||
```bash
|
||||
OLLAMA_HOST=0.0.0.0:11434 ollama serve
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
Create a `systemd` drop-in directory and set `Environment=OLLAMA_HOST`
|
||||
|
||||
```bash
|
||||
mkdir -p /etc/systemd/system/ollama.service.d
|
||||
echo '[Service]' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
```bash
|
||||
echo 'Environment="OLLAMA_HOST=0.0.0.0:11434"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
Reload `systemd` and restart Ollama:
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl restart ollama
|
||||
```
|
||||
|
||||
## How can I allow additional web origins to access Ollama?
|
||||
|
||||
Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0` by default. Add additional origins with the `OLLAMA_ORIGINS` environment variable:
|
||||
|
||||
On macOS:
|
||||
|
||||
```bash
|
||||
OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com ollama serve
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```bash
|
||||
echo 'Environment="OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
Reload `systemd` and restart Ollama:
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl restart ollama
|
||||
```
|
||||
|
||||
## Where are models stored?
|
||||
|
||||
* macOS: Raw model data is stored under `~/.ollama/models`.
|
||||
* Linux: Raw model data is stored under `/usr/share/ollama/.ollama/models`
|
||||
- macOS: Raw model data is stored under `~/.ollama/models`.
|
||||
- Linux: Raw model data is stored under `/usr/share/ollama/.ollama/models`
|
||||
|
||||
Below the models directory you will find a structure similar to the following:
|
||||
|
||||
```shell
|
||||
.
|
||||
├── blobs
|
||||
└── manifests
|
||||
└── registry.ollama.ai
|
||||
├── f0rodo
|
||||
├── library
|
||||
├── mattw
|
||||
└── saikatkumardey
|
||||
```
|
||||
|
||||
There is a `manifests/registry.ollama.ai/namespace` path. In example above, the user has downloaded models from the official `library`, `f0rodo`, `mattw`, and `saikatkumardey` namespaces. Within each of those directories, you will find directories for each of the models downloaded. And in there you will find a file name representing each tag. Each tag file is the manifest for the model.
|
||||
|
||||
The manifest lists all the layers used in this model. You will see a `media type` for each layer, along with a digest. That digest corresponds with a file in the `models/blobs directory`.
|
||||
|
||||
### How can I change where Ollama stores models?
|
||||
|
||||
To modify where models are stored, you can use the `OLLAMA_MODELS` environment variable. Note that on Linux this means defining `OLLAMA_MODELS` in a drop-in `/etc/systemd/system/ollama.service.d` service file, reloading systemd, and restarting the ollama service.
|
||||
|
||||
## Does Ollama send my prompts and answers back to Ollama.ai to use in any way?
|
||||
|
||||
No. Anything you do with Ollama, such as generate a response from the model, stays with you. We don't collect any data about how you use the model. You are always in control of your own data.
|
||||
|
||||
## How can I use Ollama in Visual Studio Code?
|
||||
|
||||
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. You can see the list of [extensions & plugins](https://github.com/jmorganca/ollama#extensions--plugins) at the bottom of the main repository readme.
|
||||
|
||||
## How do I use Ollama behind a proxy?
|
||||
|
||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values.
|
||||
|
||||
When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate.
|
||||
|
||||
On macOS:
|
||||
|
||||
```bash
|
||||
HTTPS_PROXY=http://proxy.example.com ollama serve
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```bash
|
||||
echo 'Environment="HTTPS_PROXY=https://proxy.example.com"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
Reload `systemd` and restart Ollama:
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl restart ollama
|
||||
```
|
||||
|
||||
### How do I use Ollama behind a proxy in Docker?
|
||||
|
||||
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
||||
|
||||
Alternatively, Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||
|
||||
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
||||
|
||||
```dockerfile
|
||||
FROM ollama/ollama
|
||||
COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt
|
||||
RUN update-ca-certificates
|
||||
```
|
||||
|
||||
Build and run this image:
|
||||
|
||||
```shell
|
||||
docker build -t ollama-with-ca .
|
||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||
```
|
||||
|
||||
## How do I use Ollama with GPU acceleration in Docker?
|
||||
|
||||
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
||||
|
||||
GPU acceleration is not available for Docker Desktop in macOS due to the lack of GPU passthrough and emulation.
|
||||
|
194
docs/import.md
Normal file
194
docs/import.md
Normal file
@@ -0,0 +1,194 @@
|
||||
# Import a model
|
||||
|
||||
This guide walks through importing a GGUF, PyTorch or Safetensors model.
|
||||
|
||||
## Importing (GGUF)
|
||||
|
||||
### Step 1: Write a `Modelfile`
|
||||
|
||||
Start by creating a `Modelfile`. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more.
|
||||
|
||||
```
|
||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
||||
```
|
||||
|
||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
||||
|
||||
```
|
||||
FROM ./q4_0.bin
|
||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||
```
|
||||
|
||||
### Step 2: Create the Ollama model
|
||||
|
||||
Finally, create a model from your `Modelfile`:
|
||||
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
### Step 3: Run your model
|
||||
|
||||
Next, test the model with `ollama run`:
|
||||
|
||||
```
|
||||
ollama run example "What is your favourite condiment?"
|
||||
```
|
||||
|
||||
## Importing (PyTorch & Safetensors)
|
||||
|
||||
### Supported models
|
||||
|
||||
Ollama supports a set of model architectures, with support for more coming soon:
|
||||
|
||||
- Llama & Mistral
|
||||
- Falcon & RW
|
||||
- BigCode
|
||||
|
||||
To view a model's architecture, check the `config.json` file in its HuggingFace repo. You should see an entry under `architectures` (e.g. `LlamaForCausalLM`).
|
||||
|
||||
### Step 1: Clone the HuggingFace repository (optional)
|
||||
|
||||
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
||||
|
||||
```
|
||||
git lfs install
|
||||
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
|
||||
cd Mistral-7B-Instruct-v0.1
|
||||
```
|
||||
|
||||
### Step 2: Convert and quantize to a `.bin` file (optional, for PyTorch and Safetensors)
|
||||
|
||||
If the model is in PyTorch or Safetensors format, a [Docker image](https://hub.docker.com/r/ollama/quantize) with the tooling required to convert and quantize models is available.
|
||||
|
||||
First, Install [Docker](https://www.docker.com/get-started/).
|
||||
|
||||
Next, to convert and quantize your model, run:
|
||||
|
||||
```
|
||||
docker run --rm -v .:/model ollama/quantize -q q4_0 /model
|
||||
```
|
||||
|
||||
This will output two files into the directory:
|
||||
|
||||
- `f16.bin`: the model converted to GGUF
|
||||
- `q4_0.bin` the model quantized to a 4-bit quantization (we will use this file to create the Ollama model)
|
||||
|
||||
### Step 3: Write a `Modelfile`
|
||||
|
||||
Next, create a `Modelfile` for your model:
|
||||
|
||||
```
|
||||
FROM ./q4_0.bin
|
||||
```
|
||||
|
||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
||||
|
||||
```
|
||||
FROM ./q4_0.bin
|
||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||
```
|
||||
|
||||
### Step 4: Create the Ollama model
|
||||
|
||||
Finally, create a model from your `Modelfile`:
|
||||
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
### Step 5: Run your model
|
||||
|
||||
Next, test the model with `ollama run`:
|
||||
|
||||
```
|
||||
ollama run example "What is your favourite condiment?"
|
||||
```
|
||||
|
||||
## Publishing your model (optional – early alpha)
|
||||
|
||||
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
||||
|
||||
1. Create [an account](https://ollama.ai/signup)
|
||||
2. Run `cat ~/.ollama/id_ed25519.pub` to view your Ollama public key. Copy this to the clipboard.
|
||||
3. Add your public key to your [Ollama account](https://ollama.ai/settings/keys)
|
||||
|
||||
Next, copy your model to your username's namespace:
|
||||
|
||||
```
|
||||
ollama cp example <your username>/example
|
||||
```
|
||||
|
||||
Then push the model:
|
||||
|
||||
```
|
||||
ollama push <your username>/example
|
||||
```
|
||||
|
||||
After publishing, your model will be available at `https://ollama.ai/<your username>/example`.
|
||||
|
||||
## Quantization reference
|
||||
|
||||
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
|
||||
|
||||
- `q2_K`
|
||||
- `q3_K`
|
||||
- `q3_K_S`
|
||||
- `q3_K_M`
|
||||
- `q3_K_L`
|
||||
- `q4_0` (recommended)
|
||||
- `q4_1`
|
||||
- `q4_K`
|
||||
- `q4_K_S`
|
||||
- `q4_K_M`
|
||||
- `q5_0`
|
||||
- `q5_1`
|
||||
- `q5_K`
|
||||
- `q5_K_S`
|
||||
- `q5_K_M`
|
||||
- `q6_K`
|
||||
- `q8_0`
|
||||
|
||||
## Manually converting & quantizing models
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Start by cloning the `llama.cpp` repo to your machine in another directory:
|
||||
|
||||
```
|
||||
git clone https://github.com/ggerganov/llama.cpp.git
|
||||
cd llama.cpp
|
||||
```
|
||||
|
||||
Next, install the Python dependencies:
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Finally, build the `quantize` tool:
|
||||
|
||||
```
|
||||
make quantize
|
||||
```
|
||||
|
||||
### Convert the model
|
||||
|
||||
Run the correct conversion script for your model architecture:
|
||||
|
||||
```shell
|
||||
# LlamaForCausalLM or MistralForCausalLM
|
||||
python convert.py <path to model directory>
|
||||
|
||||
# FalconForCausalLM
|
||||
python convert-falcon-hf-to-gguf.py <path to model directory>
|
||||
|
||||
# GPTBigCodeForCausalLM
|
||||
python convert-starcoder-hf-to-gguf.py <path to model directory>
|
||||
```
|
||||
|
||||
### Quantize the model
|
||||
|
||||
```
|
||||
quantize <path to model dir>/ggml-model-f32.bin <path to model dir>/q4_0.bin q4_0
|
||||
```
|
@@ -1,12 +1,16 @@
|
||||
# Installing Ollama on Linux
|
||||
# Ollama on Linux
|
||||
|
||||
> Note: A one line installer for Ollama is available by running:
|
||||
## Install
|
||||
|
||||
Install Ollama running this one-liner:
|
||||
>
|
||||
> ```bash
|
||||
> curl https://ollama.ai/install.sh | sh
|
||||
> ```
|
||||
```bash
|
||||
curl https://ollama.ai/install.sh | sh
|
||||
```
|
||||
|
||||
## Download the `ollama` binary
|
||||
## Manual install
|
||||
|
||||
### Download the `ollama` binary
|
||||
|
||||
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
|
||||
|
||||
@@ -15,31 +19,7 @@ sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||
sudo chmod +x /usr/bin/ollama
|
||||
```
|
||||
|
||||
## Start Ollama
|
||||
|
||||
Start Ollama by running `ollama serve`:
|
||||
|
||||
```bash
|
||||
ollama serve
|
||||
```
|
||||
|
||||
Once Ollama is running, run a model in another terminal session:
|
||||
|
||||
```bash
|
||||
ollama run llama2
|
||||
```
|
||||
|
||||
## Install CUDA drivers (optional – for Nvidia GPUs)
|
||||
|
||||
[Download and install](https://developer.nvidia.com/cuda-downloads) CUDA.
|
||||
|
||||
Verify that the drivers are installed by running the following command, which should print details about your GPU:
|
||||
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
## Adding Ollama as a startup service (optional)
|
||||
### Adding Ollama as a startup service (recommended)
|
||||
|
||||
Create a user for Ollama:
|
||||
|
||||
@@ -60,7 +40,6 @@ User=ollama
|
||||
Group=ollama
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
Environment="HOME=/usr/share/ollama"
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
@@ -73,7 +52,40 @@ sudo systemctl daemon-reload
|
||||
sudo systemctl enable ollama
|
||||
```
|
||||
|
||||
### Viewing logs
|
||||
### Install CUDA drivers (optional – for Nvidia GPUs)
|
||||
|
||||
[Download and install](https://developer.nvidia.com/cuda-downloads) CUDA.
|
||||
|
||||
Verify that the drivers are installed by running the following command, which should print details about your GPU:
|
||||
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
### Start Ollama
|
||||
|
||||
Start Ollama using `systemd`:
|
||||
|
||||
```bash
|
||||
sudo systemctl start ollama
|
||||
```
|
||||
|
||||
## Update
|
||||
|
||||
Update ollama by running the install script again:
|
||||
|
||||
```bash
|
||||
curl https://ollama.ai/install.sh | sh
|
||||
```
|
||||
|
||||
Or by downloading the ollama binary:
|
||||
|
||||
```bash
|
||||
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||
sudo chmod +x /usr/bin/ollama
|
||||
```
|
||||
|
||||
## Viewing logs
|
||||
|
||||
To view logs of Ollama running as a startup service, run:
|
||||
|
||||
@@ -81,3 +93,24 @@ To view logs of Ollama running as a startup service, run:
|
||||
journalctl -u ollama
|
||||
```
|
||||
|
||||
## Uninstall
|
||||
|
||||
Remove the ollama service:
|
||||
|
||||
```bash
|
||||
sudo systemctl stop ollama
|
||||
sudo systemctl disable ollama
|
||||
sudo rm /etc/systemd/system/ollama.service
|
||||
```
|
||||
|
||||
Remove the ollama binary from your bin directory (either `/usr/local/bin`, `/usr/bin`, or `/bin`):
|
||||
|
||||
```bash
|
||||
sudo rm $(which ollama)
|
||||
```
|
||||
|
||||
Remove the downloaded models and Ollama service user:
|
||||
```bash
|
||||
sudo rm -r /usr/share/ollama
|
||||
sudo userdel ollama
|
||||
```
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Ollama Model File
|
||||
|
||||
> Note: this model file syntax is in development
|
||||
> Note: this `Modelfile` syntax is in development
|
||||
|
||||
A model file is the blueprint to create and share models with Ollama.
|
||||
|
||||
@@ -12,7 +12,6 @@ A model file is the blueprint to create and share models with Ollama.
|
||||
- [FROM (Required)](#from-required)
|
||||
- [Build from llama2](#build-from-llama2)
|
||||
- [Build from a bin file](#build-from-a-bin-file)
|
||||
- [EMBED](#embed)
|
||||
- [PARAMETER](#parameter)
|
||||
- [Valid Parameters and Values](#valid-parameters-and-values)
|
||||
- [TEMPLATE](#template)
|
||||
@@ -24,25 +23,27 @@ A model file is the blueprint to create and share models with Ollama.
|
||||
|
||||
## Format
|
||||
|
||||
The format of the Modelfile:
|
||||
The format of the `Modelfile`:
|
||||
|
||||
```modelfile
|
||||
# comment
|
||||
INSTRUCTION arguments
|
||||
```
|
||||
|
||||
| Instruction | Description |
|
||||
| ----------------------------------- | ------------------------------------------------------------- |
|
||||
| [`FROM`](#from-required) (required) | Defines the base model to use. |
|
||||
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
||||
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
||||
| [`SYSTEM`](#system) | Specifies the system prompt that will be set in the template. |
|
||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||
| Instruction | Description |
|
||||
| ----------------------------------- | -------------------------------------------------------------- |
|
||||
| [`FROM`](#from-required) (required) | Defines the base model to use. |
|
||||
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
||||
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
||||
| [`SYSTEM`](#system) | Specifies the system message that will be set in the template. |
|
||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||
|
||||
## Examples
|
||||
|
||||
An example of a model file creating a mario blueprint:
|
||||
### Basic `Modelfile`
|
||||
|
||||
An example of a `Modelfile` creating a mario blueprint:
|
||||
|
||||
```modelfile
|
||||
FROM llama2
|
||||
@@ -51,24 +52,53 @@ PARAMETER temperature 1
|
||||
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
||||
PARAMETER num_ctx 4096
|
||||
|
||||
# sets a custom system prompt to specify the behavior of the chat assistant
|
||||
# sets a custom system message to specify the behavior of the chat assistant
|
||||
SYSTEM You are Mario from super mario bros, acting as an assistant.
|
||||
```
|
||||
|
||||
To use this:
|
||||
|
||||
1. Save it as a file (eg. `Modelfile`)
|
||||
2. `ollama create NAME -f <location of the file eg. ./Modelfile>'`
|
||||
3. `ollama run NAME`
|
||||
1. Save it as a file (e.g. `Modelfile`)
|
||||
2. `ollama create choose-a-model-name -f <location of the file e.g. ./Modelfile>'`
|
||||
3. `ollama run choose-a-model-name`
|
||||
4. Start using the model!
|
||||
|
||||
More examples are available in the [examples directory](../examples).
|
||||
|
||||
### `Modelfile`s in [ollama.ai/library][1]
|
||||
|
||||
There are two ways to view `Modelfile`s underlying the models in [ollama.ai/library][1]:
|
||||
|
||||
- Option 1: view a details page from a model's tags page:
|
||||
1. Go to a particular model's tags (e.g. https://ollama.ai/library/llama2/tags)
|
||||
2. Click on a tag (e.g. https://ollama.ai/library/llama2:13b)
|
||||
3. Scroll down to "Layers"
|
||||
- Note: if the [`FROM` instruction](#from-required) is not present,
|
||||
it means the model was created from a local file
|
||||
- Option 2: use `ollama show` to print the `Modelfile` like so:
|
||||
|
||||
```bash
|
||||
> ollama show --modelfile llama2:13b
|
||||
# Modelfile generated by "ollama show"
|
||||
# To build a new Modelfile based on this one, replace the FROM line with:
|
||||
# FROM llama2:13b
|
||||
|
||||
FROM /root/.ollama/models/blobs/sha256:123abc
|
||||
TEMPLATE """[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>
|
||||
|
||||
{{ end }}{{ .Prompt }} [/INST] """
|
||||
SYSTEM """"""
|
||||
PARAMETER stop [INST]
|
||||
PARAMETER stop [/INST]
|
||||
PARAMETER stop <<SYS>>
|
||||
PARAMETER stop <</SYS>>
|
||||
```
|
||||
|
||||
## Instructions
|
||||
|
||||
### FROM (Required)
|
||||
|
||||
The FROM instruction defines the base model to use when creating a model.
|
||||
The `FROM` instruction defines the base model to use when creating a model.
|
||||
|
||||
```modelfile
|
||||
FROM <model name>:<tag>
|
||||
@@ -83,24 +113,13 @@ FROM llama2
|
||||
A list of available base models:
|
||||
<https://github.com/jmorganca/ollama#model-library>
|
||||
|
||||
#### Build from a bin file
|
||||
#### Build from a `bin` file
|
||||
|
||||
```modelfile
|
||||
FROM ./ollama-model.bin
|
||||
```
|
||||
|
||||
This bin file location should be specified as an absolute path or relative to the Modelfile location.
|
||||
|
||||
### EMBED
|
||||
|
||||
The EMBED instruction is used to add embeddings of files to a model. This is useful for adding custom data that the model can reference when generating an answer. Note that currently only text files are supported, formatted with each line as one embedding.
|
||||
|
||||
```modelfile
|
||||
FROM <model name>:<tag>
|
||||
EMBED <file path>.txt
|
||||
EMBED <different file path>.txt
|
||||
EMBED <path to directory>/*.txt
|
||||
```
|
||||
This bin file location should be specified as an absolute path or relative to the `Modelfile` location.
|
||||
|
||||
### PARAMETER
|
||||
|
||||
@@ -124,7 +143,8 @@ PARAMETER <parameter> <parametervalue>
|
||||
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
||||
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
|
||||
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
|
||||
| stop | Sets the stop sequences to use. | string | stop "AI assistant:" |
|
||||
| seed | Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) | int | seed 42 |
|
||||
| stop | Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate `stop` parameters in a modelfile. | string | stop "AI assistant:" |
|
||||
| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 |
|
||||
| num_predict | Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) | int | num_predict 42 |
|
||||
| top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 |
|
||||
@@ -132,15 +152,15 @@ PARAMETER <parameter> <parametervalue>
|
||||
|
||||
### TEMPLATE
|
||||
|
||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific.
|
||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system message and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
|
||||
|
||||
#### Template Variables
|
||||
|
||||
| Variable | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------------------------------------ |
|
||||
| `{{ .System }}` | The system prompt used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
|
||||
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
|
||||
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
|
||||
| Variable | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------------------------------------- |
|
||||
| `{{ .System }}` | The system message used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
|
||||
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
|
||||
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
|
||||
|
||||
```modelfile
|
||||
TEMPLATE """
|
||||
@@ -160,7 +180,7 @@ SYSTEM """<system message>"""
|
||||
|
||||
### SYSTEM
|
||||
|
||||
The `SYSTEM` instruction specifies the system prompt to be used in the template, if applicable.
|
||||
The `SYSTEM` instruction specifies the system message to be used in the template, if applicable.
|
||||
|
||||
```modelfile
|
||||
SYSTEM """<system message>"""
|
||||
@@ -186,5 +206,7 @@ LICENSE """
|
||||
|
||||
## Notes
|
||||
|
||||
- the **modelfile is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
|
||||
- the **`Modelfile` is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
|
||||
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.
|
||||
|
||||
[1]: https://ollama.ai/library
|
||||
|
@@ -4,5 +4,6 @@ Here is a list of ways you can use Ollama with other tools to build interesting
|
||||
|
||||
- [Using LangChain with Ollama in JavaScript](./tutorials/langchainjs.md)
|
||||
- [Using LangChain with Ollama in Python](./tutorials/langchainpy.md)
|
||||
- [Running Ollama on NVIDIA Jetson Devices](./tutorials/nvidia-jetson.md)
|
||||
|
||||
Also be sure to check out the [examples](../examples) directory for more ways to use Ollama.
|
||||
Also be sure to check out the [examples](../examples) directory for more ways to use Ollama.
|
||||
|
83
docs/tutorials/fly-gpu.md
Normal file
83
docs/tutorials/fly-gpu.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Running Ollama on Fly.io GPU Instances
|
||||
|
||||
Ollama runs with little to no configuration on [Fly.io GPU instances](https://fly.io/docs/gpus/gpu-quickstart/). If you don't have access to GPUs yet, you'll need to [apply for access](https://fly.io/gpu/) on the waitlist. Once you're accepted, you'll get an email with instructions on how to get started.
|
||||
|
||||
Create a new app with `fly apps create`:
|
||||
|
||||
```bash
|
||||
fly apps create
|
||||
```
|
||||
|
||||
Then create a `fly.toml` file in a new folder that looks like this:
|
||||
|
||||
```toml
|
||||
app = "sparkling-violet-709"
|
||||
primary_region = "ord"
|
||||
vm.size = "a100-40gb" # see https://fly.io/docs/gpus/gpu-quickstart/ for more info
|
||||
|
||||
[build]
|
||||
image = "ollama/ollama"
|
||||
|
||||
[http_service]
|
||||
internal_port = 11434
|
||||
force_https = false
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
processes = ["app"]
|
||||
|
||||
[mounts]
|
||||
source = "models"
|
||||
destination = "/root/.ollama"
|
||||
initial_size = "100gb"
|
||||
```
|
||||
|
||||
Then create a [new private IPv6 address](https://fly.io/docs/reference/private-networking/#flycast-private-load-balancing) for your app:
|
||||
|
||||
```bash
|
||||
fly ips allocate-v6 --private
|
||||
```
|
||||
|
||||
Then deploy your app:
|
||||
|
||||
```bash
|
||||
fly deploy
|
||||
```
|
||||
|
||||
And finally you can access it interactively with a new Fly.io Machine:
|
||||
|
||||
```
|
||||
fly machine run -e OLLAMA_HOST=http://your-app-name.flycast --shell ollama/ollama
|
||||
```
|
||||
|
||||
```bash
|
||||
$ ollama run openchat:7b-v3.5-fp16
|
||||
>>> How do I bake chocolate chip cookies?
|
||||
To bake chocolate chip cookies, follow these steps:
|
||||
|
||||
1. Preheat the oven to 375°F (190°C) and line a baking sheet with parchment paper or silicone baking mat.
|
||||
|
||||
2. In a large bowl, mix together 1 cup of unsalted butter (softened), 3/4 cup granulated sugar, and 3/4
|
||||
cup packed brown sugar until light and fluffy.
|
||||
|
||||
3. Add 2 large eggs, one at a time, to the butter mixture, beating well after each addition. Stir in 1
|
||||
teaspoon of pure vanilla extract.
|
||||
|
||||
4. In a separate bowl, whisk together 2 cups all-purpose flour, 1/2 teaspoon baking soda, and 1/2 teaspoon
|
||||
salt. Gradually add the dry ingredients to the wet ingredients, stirring until just combined.
|
||||
|
||||
5. Fold in 2 cups of chocolate chips (or chunks) into the dough.
|
||||
|
||||
6. Drop rounded tablespoons of dough onto the prepared baking sheet, spacing them about 2 inches apart.
|
||||
|
||||
7. Bake for 10-12 minutes, or until the edges are golden brown. The centers should still be slightly soft.
|
||||
|
||||
8. Allow the cookies to cool on the baking sheet for a few minutes before transferring them to a wire rack
|
||||
to cool completely.
|
||||
|
||||
Enjoy your homemade chocolate chip cookies!
|
||||
```
|
||||
|
||||
When you set it up like this, it will automatically turn off when you're done using it. Then when you access it again, it will automatically turn back on. This is a great way to save money on GPU instances when you're not using them. If you want a persistent wake-on-use connection to your Ollama instance, you can set up a [connection to your Fly network using WireGuard](https://fly.io/docs/reference/private-networking/#discovering-apps-through-dns-on-a-wireguard-connection). Then you can access your Ollama instance at `http://your-app-name.flycast`.
|
||||
|
||||
And that's it!
|
@@ -23,13 +23,17 @@ const answer = await ollama.call(`why is the sky blue?`);
|
||||
console.log(answer);
|
||||
```
|
||||
|
||||
That will get us the same thing as if we ran `ollama run llama2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's build that part of the app.
|
||||
That will get us the same thing as if we ran `ollama run llama2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
||||
|
||||
```bash
|
||||
npm install cheerio
|
||||
```
|
||||
|
||||
```javascript
|
||||
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
|
||||
|
||||
const loader = new CheerioWebBaseLoader("https://en.wikipedia.org/wiki/2023_Hawaii_wildfires");
|
||||
const data = loader.load();
|
||||
const data = await loader.load();
|
||||
```
|
||||
|
||||
That will load the document. Although this page is smaller than the Odyssey, it is certainly bigger than the context size for most LLMs. So we are going to need to split into smaller pieces, and then select just the pieces relevant to our question. This is a great use for a vector datastore. In this example, we will use the **MemoryVectorStore** that is part of **LangChain**. But there is one more thing we need to get the content into the datastore. We have to run an embeddings process that converts the tokens in the text into a series of vectors. And for that, we are going to use **Tensorflow**. There is a lot of stuff going on in this one. First, install the **Tensorflow** components that we need.
|
||||
|
@@ -42,12 +42,13 @@ text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||
all_splits = text_splitter.split_documents(data)
|
||||
```
|
||||
|
||||
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. For now, we don't have embeddings built in to Ollama, though we will be adding that soon, so for now, we can use the GPT4All library for that. We will use ChromaDB in this example for a vector database. `pip install GPT4All chromadb`
|
||||
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install GPT4All chromadb`
|
||||
|
||||
```python
|
||||
from langchain.embeddings import GPT4AllEmbeddings
|
||||
from langchain.embeddings import OllamaEmbeddings
|
||||
from langchain.vectorstores import Chroma
|
||||
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
|
||||
oembed = OllamaEmbeddings(base_url="http://localhost:11434", model="llama2")
|
||||
vectorstore = Chroma.from_documents(documents=all_splits, embedding=oembed)
|
||||
```
|
||||
|
||||
Now let's ask a question from the document. **Who was Neleus, and who is in his family?** Neleus is a character in the Odyssey, and the answer can be found in our text.
|
||||
|
38
docs/tutorials/nvidia-jetson.md
Normal file
38
docs/tutorials/nvidia-jetson.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Running Ollama on NVIDIA Jetson Devices
|
||||
|
||||
With some minor configuration, Ollama runs well on [NVIDIA Jetson Devices](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/). The following has been tested on [JetPack 5.1.2](https://developer.nvidia.com/embedded/jetpack).
|
||||
|
||||
NVIDIA Jetson devices are Linux-based embedded AI computers that are purpose-built for AI applications.
|
||||
|
||||
Jetsons have an integrated GPU that is wired directly to the memory controller of the machine. For this reason, the `nvidia-smi` command is unrecognized, and Ollama proceeds to operate in "CPU only"
|
||||
mode. This can be verified by using a monitoring tool like jtop.
|
||||
|
||||
In order to address this, we simply pass the path to the Jetson's pre-installed CUDA libraries into `ollama serve` (while in a tmux session). We then hardcode the num_gpu parameters into a cloned
|
||||
version of our target model.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- curl
|
||||
- tmux
|
||||
|
||||
Here are the steps:
|
||||
|
||||
- Install Ollama via standard Linux command (ignore the 404 error): `curl https://ollama.ai/install.sh | sh`
|
||||
- Stop the Ollama service: `sudo systemctl stop ollama`
|
||||
- Start Ollama serve in a tmux session called ollama_jetson and reference the CUDA libraries path: `tmux has-session -t ollama_jetson 2>/dev/null || tmux new-session -d -s ollama_jetson
|
||||
'LD_LIBRARY_PATH=/usr/local/cuda/lib64 ollama serve'`
|
||||
- Pull the model you want to use (e.g. mistral): `ollama pull mistral`
|
||||
- Create a new Modelfile specifically for enabling GPU support on the Jetson: `touch ModelfileMistralJetson`
|
||||
- In the ModelfileMistralJetson file, specify the FROM model and the num_gpu PARAMETER as shown below:
|
||||
|
||||
```
|
||||
FROM mistral
|
||||
PARAMETER num_gpu 999
|
||||
```
|
||||
|
||||
- Create a new model from your Modelfile: `ollama create mistral-jetson -f ./ModelfileMistralJetson`
|
||||
- Run the new model: `ollama run mistral-jetson`
|
||||
|
||||
If you run a monitoring tool like jtop you should now see that Ollama is using the Jetson's integrated GPU.
|
||||
|
||||
And that's it!
|
171
examples/.gitignore
vendored
Normal file
171
examples/.gitignore
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
node_modules
|
||||
# OSX
|
||||
.DS_STORE
|
||||
|
||||
# Models
|
||||
models/
|
||||
|
||||
# Local Chroma db
|
||||
.chroma/
|
||||
db/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
@@ -1,15 +1,3 @@
|
||||
# Examples
|
||||
|
||||
This directory contains different examples of using Ollama
|
||||
|
||||
To create a model:
|
||||
|
||||
```
|
||||
ollama create example -f <example file>
|
||||
```
|
||||
|
||||
To run a model:
|
||||
|
||||
```
|
||||
ollama run example
|
||||
```
|
||||
This directory contains different examples of using Ollama.
|
||||
|
10
examples/bash-comparemodels/README.md
Normal file
10
examples/bash-comparemodels/README.md
Normal file
@@ -0,0 +1,10 @@
|
||||
# Bash Shell examples
|
||||
|
||||
When calling `ollama`, you can pass it a file to run all the prompts in the file, one after the other:
|
||||
|
||||
`ollama run llama2 < sourcequestions.txt`
|
||||
|
||||
This concept is used in the following example.
|
||||
|
||||
## Compare Models
|
||||
`comparemodels.sh` is a script that runs all the questions in `sourcequestions.txt` using any 4 models you choose that you have already pulled from the Ollama library or have created locally.
|
64
examples/bash-comparemodels/comparemodels.sh
Executable file
64
examples/bash-comparemodels/comparemodels.sh
Executable file
@@ -0,0 +1,64 @@
|
||||
#! /usr/bin/env bash
|
||||
# Compare multiple models by running them with the same questions
|
||||
|
||||
NUMBEROFCHOICES=4
|
||||
SELECTIONS=()
|
||||
declare -a SUMS=()
|
||||
|
||||
# Get the list of models
|
||||
CHOICES=$(ollama list | awk '{print $1}')
|
||||
|
||||
# Select which models to run as a comparison
|
||||
echo "Select $NUMBEROFCHOICES models to compare:"
|
||||
select ITEM in $CHOICES; do
|
||||
if [[ -n $ITEM ]]; then
|
||||
echo "You have selected $ITEM"
|
||||
SELECTIONS+=("$ITEM")
|
||||
((COUNT++))
|
||||
if [[ $COUNT -eq $NUMBEROFCHOICES ]]; then
|
||||
break
|
||||
fi
|
||||
else
|
||||
echo "Invalid selection"
|
||||
fi
|
||||
done
|
||||
|
||||
# Loop through each of the selected models
|
||||
for ITEM in "${SELECTIONS[@]}"; do
|
||||
echo "--------------------------------------------------------------"
|
||||
echo "Loading the model $ITEM into memory"
|
||||
ollama run "$ITEM" ""
|
||||
echo "--------------------------------------------------------------"
|
||||
echo "Running the questions through the model $ITEM"
|
||||
COMMAND_OUTPUT=$(ollama run "$ITEM" --verbose < sourcequestions.txt 2>&1| tee /dev/stderr)
|
||||
|
||||
# eval duration is sometimes listed in seconds and sometimes in milliseconds.
|
||||
# Add up the values for each model
|
||||
SUM=$(echo "$COMMAND_OUTPUT" | awk '
|
||||
/eval duration:/ {
|
||||
value = $3
|
||||
if (index(value, "ms") > 0) {
|
||||
gsub("ms", "", value)
|
||||
value /= 1000
|
||||
} else {
|
||||
gsub("s", "", value)
|
||||
}
|
||||
sum += value
|
||||
}
|
||||
END { print sum }')
|
||||
|
||||
|
||||
SUMS+=("All questions for $ITEM completed in $SUM seconds")
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "--------------------------------------------------------------"
|
||||
echo -e "Sums of eval durations for each run:"
|
||||
for val in "${SUMS[@]}"; do
|
||||
echo "$val"
|
||||
done
|
||||
|
||||
echo "--------------------------------------------------------------"
|
||||
echo "Comparison complete. Now you can decide"
|
||||
echo "which model is best."
|
||||
echo "--------------------------------------------------------------"
|
7
examples/bash-comparemodels/sourcequestions.txt
Normal file
7
examples/bash-comparemodels/sourcequestions.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
Why is the sky blue
|
||||
What is a black hole
|
||||
Explain the big bang theory like I am 5?
|
||||
What is the quickest way to win a game of Monopoly with 3 others?
|
||||
Why does a vacuum bottle keep my coffee hot and my milkshake cold?
|
||||
What is the difference between a meteor, a meteorite, and a meteoroid?
|
||||
Create an array with 5 items and print to the console. Do this in Python, C#, Typescript, and Rust.
|
0
examples/golang-simplegenerate/README.md
Normal file
0
examples/golang-simplegenerate/README.md
Normal file
27
examples/golang-simplegenerate/main.go
Normal file
27
examples/golang-simplegenerate/main.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
body := []byte(`{"model":"mistral"}`)
|
||||
resp, err := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(body))
|
||||
|
||||
if err != nil {
|
||||
fmt.Print(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
responseData, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Println(string(responseData))
|
||||
|
||||
}
|
5
examples/jupyter-notebook/README.md
Normal file
5
examples/jupyter-notebook/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Ollama Jupyter Notebook
|
||||
|
||||
This example downloads and installs Ollama in a Jupyter instance such as Google Colab. It will start the Ollama service and expose an endpoint using `ngrok` which can be used to communicate with the Ollama instance remotely.
|
||||
|
||||
For best results, use an instance with GPU accelerator.
|
102
examples/jupyter-notebook/ollama.ipynb
Normal file
102
examples/jupyter-notebook/ollama.ipynb
Normal file
@@ -0,0 +1,102 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "93f59dcb-c588-41b8-a792-55d88ade739c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Download and run the Ollama Linux install script\n",
|
||||
"!curl https://ollama.ai/install.sh | sh\n",
|
||||
"!command -v systemctl >/dev/null && sudo systemctl stop ollama"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "658c147e-c7f8-490e-910e-62b80f577dda",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!pip install aiohttp pyngrok\n",
|
||||
"\n",
|
||||
"import os\n",
|
||||
"import asyncio\n",
|
||||
"from aiohttp import ClientSession\n",
|
||||
"\n",
|
||||
"# Set LD_LIBRARY_PATH so the system NVIDIA library becomes preferred\n",
|
||||
"# over the built-in library. This is particularly important for \n",
|
||||
"# Google Colab which installs older drivers\n",
|
||||
"os.environ.update({'LD_LIBRARY_PATH': '/usr/lib64-nvidia'})\n",
|
||||
"\n",
|
||||
"async def run(cmd):\n",
|
||||
" '''\n",
|
||||
" run is a helper function to run subcommands asynchronously.\n",
|
||||
" '''\n",
|
||||
" print('>>> starting', *cmd)\n",
|
||||
" p = await asyncio.subprocess.create_subprocess_exec(\n",
|
||||
" *cmd,\n",
|
||||
" stdout=asyncio.subprocess.PIPE,\n",
|
||||
" stderr=asyncio.subprocess.PIPE,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" async def pipe(lines):\n",
|
||||
" async for line in lines:\n",
|
||||
" print(line.strip().decode('utf-8'))\n",
|
||||
"\n",
|
||||
" await asyncio.gather(\n",
|
||||
" pipe(p.stdout),\n",
|
||||
" pipe(p.stderr),\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"await asyncio.gather(\n",
|
||||
" run(['ollama', 'serve']),\n",
|
||||
" run(['ngrok', 'http', '--log', 'stderr', '11434']),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "e7735a55-9aad-4caf-8683-52e2163ba53b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The previous cell starts two processes, `ollama` and `ngrok`. The log output will show a line like the following which describes the external address.\n",
|
||||
"\n",
|
||||
"```\n",
|
||||
"t=2023-11-12T22:55:56+0000 lvl=info msg=\"started tunnel\" obj=tunnels name=command_line addr=http://localhost:11434 url=https://8249-34-125-179-11.ngrok.io\n",
|
||||
"```\n",
|
||||
"\n",
|
||||
"The external address in this case is `https://8249-34-125-179-11.ngrok.io` which can be passed into `OLLAMA_HOST` to access this instance.\n",
|
||||
"\n",
|
||||
"```bash\n",
|
||||
"export OLLAMA_HOST=https://8249-34-125-179-11.ngrok.io\n",
|
||||
"ollama list\n",
|
||||
"ollama run mistral\n",
|
||||
"```"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.6"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
36
examples/kubernetes/README.md
Normal file
36
examples/kubernetes/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Deploy Ollama to Kubernetes
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Ollama: https://ollama.ai/download
|
||||
- Kubernetes cluster. This example will use Google Kubernetes Engine.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Create the Ollama namespace, daemon set, and service
|
||||
|
||||
```bash
|
||||
kubectl apply -f cpu.yaml
|
||||
```
|
||||
|
||||
1. Port forward the Ollama service to connect and use it locally
|
||||
|
||||
```bash
|
||||
kubectl -n ollama port-forward service/ollama 11434:80
|
||||
```
|
||||
|
||||
1. Pull and run a model, for example `orca-mini:3b`
|
||||
|
||||
```bash
|
||||
ollama run orca-mini:3b
|
||||
```
|
||||
|
||||
## (Optional) Hardware Acceleration
|
||||
|
||||
Hardware acceleration in Kubernetes requires NVIDIA's [`k8s-device-plugin`](https://github.com/NVIDIA/k8s-device-plugin). Follow the link for more details.
|
||||
|
||||
Once configured, create a GPU enabled Ollama deployment.
|
||||
|
||||
```bash
|
||||
kubectl apply -f gpu.yaml
|
||||
```
|
42
examples/kubernetes/cpu.yaml
Normal file
42
examples/kubernetes/cpu.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ollama
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ollama
|
||||
namespace: ollama
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: ollama
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: ollama
|
||||
spec:
|
||||
containers:
|
||||
- name: ollama
|
||||
image: ollama/ollama:latest
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 11434
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ollama
|
||||
namespace: ollama
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
name: ollama
|
||||
ports:
|
||||
- port: 80
|
||||
name: http
|
||||
targetPort: http
|
||||
protocol: TCP
|
58
examples/kubernetes/gpu.yaml
Normal file
58
examples/kubernetes/gpu.yaml
Normal file
@@ -0,0 +1,58 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ollama
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: ollama
|
||||
namespace: ollama
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
name: ollama
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: ollama
|
||||
spec:
|
||||
containers:
|
||||
- name: ollama
|
||||
image: ollama/ollama:latest
|
||||
env:
|
||||
- name: PATH
|
||||
value: /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
- name: LD_LIBRARY_PATH
|
||||
value: /usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
- name: NVIDIA_DRIVER_CAPABILITIES
|
||||
value: compute,utility
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 11434
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 1
|
||||
tolerations:
|
||||
- key: nvidia.com/gpu
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ollama
|
||||
namespace: ollama
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
name: ollama
|
||||
ports:
|
||||
- port: 80
|
||||
name: http
|
||||
targetPort: http
|
||||
protocol: TCP
|
@@ -6,7 +6,6 @@ PERSIST_DIRECTORY = os.environ.get('PERSIST_DIRECTORY', 'db')
|
||||
|
||||
# Define the Chroma settings
|
||||
CHROMA_SETTINGS = Settings(
|
||||
chroma_db_impl='duckdb+parquet',
|
||||
persist_directory=PERSIST_DIRECTORY,
|
||||
anonymized_telemetry=False
|
||||
)
|
@@ -150,7 +150,7 @@ def main():
|
||||
print("Creating new vectorstore")
|
||||
texts = process_documents()
|
||||
print(f"Creating embeddings. May take some minutes...")
|
||||
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)
|
||||
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory)
|
||||
db.persist()
|
||||
db = None
|
||||
|
@@ -4,6 +4,7 @@ from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.llms import Ollama
|
||||
import chromadb
|
||||
import os
|
||||
import argparse
|
||||
import time
|
||||
@@ -22,7 +23,9 @@ def main():
|
||||
# Parse the command line arguments
|
||||
args = parse_arguments()
|
||||
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
|
||||
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
|
||||
|
||||
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
|
||||
|
||||
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
|
||||
# activate/deactivate the streaming StdOut callback for LLMs
|
||||
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
|
14
examples/langchain-python-rag-privategpt/requirements.txt
Normal file
14
examples/langchain-python-rag-privategpt/requirements.txt
Normal file
@@ -0,0 +1,14 @@
|
||||
langchain==0.0.274
|
||||
gpt4all==1.0.8
|
||||
chromadb==0.4.7
|
||||
llama-cpp-python==0.1.81
|
||||
urllib3==2.0.4
|
||||
PyMuPDF==1.23.5
|
||||
python-dotenv==1.0.0
|
||||
unstructured==0.10.8
|
||||
extract-msg==0.45.0
|
||||
tabulate==0.9.0
|
||||
pandoc==2.3
|
||||
pypandoc==1.11
|
||||
tqdm==4.66.1
|
||||
sentence_transformers==2.2.2
|
21
examples/langchain-typescript-simple/README.md
Normal file
21
examples/langchain-typescript-simple/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# LangChain
|
||||
|
||||
This example is a basic "hello world" of using LangChain with Ollama using Node.js and Typescript.
|
||||
|
||||
## Setup
|
||||
|
||||
```shell
|
||||
npm install
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```shell
|
||||
ts-node main.ts
|
||||
```
|
||||
|
||||
Running this example will print the response for "hello":
|
||||
|
||||
```plaintext
|
||||
Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat?
|
||||
```
|
15
examples/langchain-typescript-simple/main.ts
Normal file
15
examples/langchain-typescript-simple/main.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { Ollama} from 'langchain/llms/ollama';
|
||||
|
||||
async function main() {
|
||||
const ollama = new Ollama({
|
||||
model: 'mistral'
|
||||
// other parameters can be found at https://js.langchain.com/docs/api/llms_ollama/classes/Ollama
|
||||
})
|
||||
const stream = await ollama.stream("Hello");
|
||||
|
||||
for await (const chunk of stream) {
|
||||
process.stdout.write(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
997
examples/langchain-typescript-simple/package-lock.json
generated
Normal file
997
examples/langchain-typescript-simple/package-lock.json
generated
Normal file
@@ -0,0 +1,997 @@
|
||||
{
|
||||
"name": "with-langchain-typescript-simplegenerate",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"dependencies": {
|
||||
"langchain": "^0.0.165"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@anthropic-ai/sdk": {
|
||||
"version": "0.6.2",
|
||||
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.6.2.tgz",
|
||||
"integrity": "sha512-fB9PUj9RFT+XjkL+E9Ol864ZIJi+1P8WnbHspN3N3/GK2uSzjd0cbVIKTGgf4v3N8MwaQu+UWnU7C4BG/fap/g==",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"digest-fetch": "^1.3.0",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "18.18.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.4.tgz",
|
||||
"integrity": "sha512-t3rNFBgJRugIhackit2mVcLfF6IRc0JE4oeizPQL8Zrm8n2WY/0wOdpOPhdtG0V9Q2TlW/axbF1MJ6z+Yj/kKQ=="
|
||||
},
|
||||
"node_modules/@types/node-fetch": {
|
||||
"version": "2.6.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.6.tgz",
|
||||
"integrity": "sha512-95X8guJYhfqiuVVhRFxVQcf4hW/2bCuoPwDasMf/531STFoNoWTT7YDnWdXHEZKqAGUigmpG31r2FE70LwnzJw==",
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
"form-data": "^4.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/retry": {
|
||||
"version": "0.12.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
|
||||
"integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="
|
||||
},
|
||||
"node_modules/@types/uuid": {
|
||||
"version": "9.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.5.tgz",
|
||||
"integrity": "sha512-xfHdwa1FMJ082prjSJpoEI57GZITiQz10r3vEJCHa2khEFQjKy91aWKz6+zybzssCvXUwE1LQWgWVwZ4nYUvHQ=="
|
||||
},
|
||||
"node_modules/abort-controller": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
|
||||
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
|
||||
"dependencies": {
|
||||
"event-target-shim": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=6.5"
|
||||
}
|
||||
},
|
||||
"node_modules/agentkeepalive": {
|
||||
"version": "4.5.0",
|
||||
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz",
|
||||
"integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==",
|
||||
"dependencies": {
|
||||
"humanize-ms": "^1.2.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ansi-styles": {
|
||||
"version": "5.2.0",
|
||||
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
|
||||
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/argparse": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
|
||||
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
|
||||
},
|
||||
"node_modules/asynckit": {
|
||||
"version": "0.4.0",
|
||||
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
|
||||
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
|
||||
},
|
||||
"node_modules/base-64": {
|
||||
"version": "0.1.0",
|
||||
"resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz",
|
||||
"integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA=="
|
||||
},
|
||||
"node_modules/base64-js": {
|
||||
"version": "1.5.1",
|
||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/feross"
|
||||
},
|
||||
{
|
||||
"type": "patreon",
|
||||
"url": "https://www.patreon.com/feross"
|
||||
},
|
||||
{
|
||||
"type": "consulting",
|
||||
"url": "https://feross.org/support"
|
||||
}
|
||||
]
|
||||
},
|
||||
"node_modules/binary-extensions": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
|
||||
"integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/binary-search": {
|
||||
"version": "1.3.6",
|
||||
"resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz",
|
||||
"integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA=="
|
||||
},
|
||||
"node_modules/camelcase": {
|
||||
"version": "6.3.0",
|
||||
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
|
||||
"integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/charenc": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz",
|
||||
"integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==",
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/combined-stream": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
|
||||
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
|
||||
"dependencies": {
|
||||
"delayed-stream": "~1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/commander": {
|
||||
"version": "10.0.1",
|
||||
"resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz",
|
||||
"integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==",
|
||||
"engines": {
|
||||
"node": ">=14"
|
||||
}
|
||||
},
|
||||
"node_modules/crypt": {
|
||||
"version": "0.0.2",
|
||||
"resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz",
|
||||
"integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==",
|
||||
"engines": {
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/decamelize": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
|
||||
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/delayed-stream": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/digest-fetch": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz",
|
||||
"integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==",
|
||||
"dependencies": {
|
||||
"base-64": "^0.1.0",
|
||||
"md5": "^2.3.0"
|
||||
}
|
||||
},
|
||||
"node_modules/event-target-shim": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
|
||||
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/eventemitter3": {
|
||||
"version": "4.0.7",
|
||||
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
|
||||
"integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
|
||||
},
|
||||
"node_modules/expr-eval": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/expr-eval/-/expr-eval-2.0.2.tgz",
|
||||
"integrity": "sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg=="
|
||||
},
|
||||
"node_modules/flat": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz",
|
||||
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==",
|
||||
"bin": {
|
||||
"flat": "cli.js"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data": {
|
||||
"version": "4.0.0",
|
||||
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
|
||||
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
|
||||
"dependencies": {
|
||||
"asynckit": "^0.4.0",
|
||||
"combined-stream": "^1.0.8",
|
||||
"mime-types": "^2.1.12"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/form-data-encoder": {
|
||||
"version": "1.7.2",
|
||||
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
|
||||
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="
|
||||
},
|
||||
"node_modules/formdata-node": {
|
||||
"version": "4.4.1",
|
||||
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
|
||||
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
|
||||
"dependencies": {
|
||||
"node-domexception": "1.0.0",
|
||||
"web-streams-polyfill": "4.0.0-beta.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 12.20"
|
||||
}
|
||||
},
|
||||
"node_modules/humanize-ms": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
|
||||
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
|
||||
"dependencies": {
|
||||
"ms": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/is-any-array": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz",
|
||||
"integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ=="
|
||||
},
|
||||
"node_modules/is-buffer": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
|
||||
"integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
|
||||
},
|
||||
"node_modules/js-tiktoken": {
|
||||
"version": "1.0.7",
|
||||
"resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.7.tgz",
|
||||
"integrity": "sha512-biba8u/clw7iesNEWLOLwrNGoBP2lA+hTaBLs/D45pJdUPFXyxD6nhcDVtADChghv4GgyAiMKYMiRx7x6h7Biw==",
|
||||
"dependencies": {
|
||||
"base64-js": "^1.5.1"
|
||||
}
|
||||
},
|
||||
"node_modules/js-yaml": {
|
||||
"version": "4.1.0",
|
||||
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
|
||||
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
|
||||
"dependencies": {
|
||||
"argparse": "^2.0.1"
|
||||
},
|
||||
"bin": {
|
||||
"js-yaml": "bin/js-yaml.js"
|
||||
}
|
||||
},
|
||||
"node_modules/jsonpointer": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz",
|
||||
"integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/langchain": {
|
||||
"version": "0.0.165",
|
||||
"resolved": "https://registry.npmjs.org/langchain/-/langchain-0.0.165.tgz",
|
||||
"integrity": "sha512-CpbNpjwaE+9lzjdw+pZz0VgnRrFivEgr7CVp9dDaAb5JpaJAA4V2v6uQ9ZPN+TSqupTQ79HFn2sfyZVEl2EG7Q==",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.6.2",
|
||||
"ansi-styles": "^5.0.0",
|
||||
"binary-extensions": "^2.2.0",
|
||||
"camelcase": "6",
|
||||
"decamelize": "^1.2.0",
|
||||
"expr-eval": "^2.0.2",
|
||||
"flat": "^5.0.2",
|
||||
"js-tiktoken": "^1.0.7",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jsonpointer": "^5.0.1",
|
||||
"langchainhub": "~0.0.6",
|
||||
"langsmith": "~0.0.31",
|
||||
"ml-distance": "^4.0.0",
|
||||
"object-hash": "^3.0.0",
|
||||
"openai": "~4.4.0",
|
||||
"openapi-types": "^12.1.3",
|
||||
"p-queue": "^6.6.2",
|
||||
"p-retry": "4",
|
||||
"uuid": "^9.0.0",
|
||||
"yaml": "^2.2.1",
|
||||
"zod": "^3.22.3",
|
||||
"zod-to-json-schema": "^3.20.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@aws-crypto/sha256-js": "^5.0.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "^3.422.0",
|
||||
"@aws-sdk/client-dynamodb": "^3.310.0",
|
||||
"@aws-sdk/client-kendra": "^3.352.0",
|
||||
"@aws-sdk/client-lambda": "^3.310.0",
|
||||
"@aws-sdk/client-s3": "^3.310.0",
|
||||
"@aws-sdk/client-sagemaker-runtime": "^3.310.0",
|
||||
"@aws-sdk/client-sfn": "^3.310.0",
|
||||
"@aws-sdk/credential-provider-node": "^3.388.0",
|
||||
"@azure/storage-blob": "^12.15.0",
|
||||
"@clickhouse/client": "^0.0.14",
|
||||
"@cloudflare/ai": "^1.0.12",
|
||||
"@elastic/elasticsearch": "^8.4.0",
|
||||
"@getmetal/metal-sdk": "*",
|
||||
"@getzep/zep-js": "^0.7.0",
|
||||
"@gomomento/sdk": "^1.23.0",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@google-cloud/storage": "^6.10.1",
|
||||
"@huggingface/inference": "^1.5.1",
|
||||
"@mozilla/readability": "*",
|
||||
"@notionhq/client": "^2.2.10",
|
||||
"@opensearch-project/opensearch": "*",
|
||||
"@pinecone-database/pinecone": "^1.1.0",
|
||||
"@planetscale/database": "^1.8.0",
|
||||
"@qdrant/js-client-rest": "^1.2.0",
|
||||
"@raycast/api": "^1.55.2",
|
||||
"@smithy/eventstream-codec": "^2.0.5",
|
||||
"@smithy/protocol-http": "^3.0.6",
|
||||
"@smithy/signature-v4": "^2.0.10",
|
||||
"@smithy/util-utf8": "^2.0.0",
|
||||
"@supabase/postgrest-js": "^1.1.1",
|
||||
"@supabase/supabase-js": "^2.10.0",
|
||||
"@tensorflow-models/universal-sentence-encoder": "*",
|
||||
"@tensorflow/tfjs-converter": "*",
|
||||
"@tensorflow/tfjs-core": "*",
|
||||
"@upstash/redis": "^1.20.6",
|
||||
"@vercel/postgres": "^0.5.0",
|
||||
"@writerai/writer-sdk": "^0.40.2",
|
||||
"@xata.io/client": "^0.25.1",
|
||||
"@xenova/transformers": "^2.5.4",
|
||||
"@zilliz/milvus2-sdk-node": ">=2.2.7",
|
||||
"apify-client": "^2.7.1",
|
||||
"axios": "*",
|
||||
"cassandra-driver": "^4.6.4",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
"chromadb": "*",
|
||||
"cohere-ai": ">=6.0.0",
|
||||
"d3-dsv": "^2.0.0",
|
||||
"epub2": "^3.0.1",
|
||||
"faiss-node": "^0.3.0",
|
||||
"fast-xml-parser": "^4.2.7",
|
||||
"firebase-admin": "^11.9.0",
|
||||
"google-auth-library": "^8.9.0",
|
||||
"googleapis": "^126.0.1",
|
||||
"hnswlib-node": "^1.4.2",
|
||||
"html-to-text": "^9.0.5",
|
||||
"ignore": "^5.2.0",
|
||||
"ioredis": "^5.3.2",
|
||||
"jsdom": "*",
|
||||
"llmonitor": "*",
|
||||
"lodash": "^4.17.21",
|
||||
"mammoth": "*",
|
||||
"mongodb": "^5.2.0",
|
||||
"mysql2": "^3.3.3",
|
||||
"neo4j-driver": "*",
|
||||
"node-llama-cpp": "*",
|
||||
"notion-to-md": "^3.1.0",
|
||||
"pdf-parse": "1.1.1",
|
||||
"peggy": "^3.0.2",
|
||||
"pg": "^8.11.0",
|
||||
"pg-copy-streams": "^6.0.5",
|
||||
"pickleparser": "^0.1.0",
|
||||
"playwright": "^1.32.1",
|
||||
"portkey-ai": "^0.1.11",
|
||||
"puppeteer": "^19.7.2",
|
||||
"redis": "^4.6.4",
|
||||
"replicate": "^0.18.0",
|
||||
"sonix-speech-recognition": "^2.1.1",
|
||||
"srt-parser-2": "^1.2.2",
|
||||
"typeorm": "^0.3.12",
|
||||
"typesense": "^1.5.3",
|
||||
"usearch": "^1.1.1",
|
||||
"vectordb": "^0.1.4",
|
||||
"voy-search": "0.6.2",
|
||||
"weaviate-ts-client": "^1.4.0",
|
||||
"web-auth-library": "^1.0.3",
|
||||
"youtube-transcript": "^1.0.6",
|
||||
"youtubei.js": "^5.8.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@aws-crypto/sha256-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-bedrock-runtime": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-dynamodb": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-kendra": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-lambda": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-s3": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-sagemaker-runtime": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/client-sfn": {
|
||||
"optional": true
|
||||
},
|
||||
"@aws-sdk/credential-provider-node": {
|
||||
"optional": true
|
||||
},
|
||||
"@azure/storage-blob": {
|
||||
"optional": true
|
||||
},
|
||||
"@clickhouse/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@cloudflare/ai": {
|
||||
"optional": true
|
||||
},
|
||||
"@elastic/elasticsearch": {
|
||||
"optional": true
|
||||
},
|
||||
"@getmetal/metal-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@getzep/zep-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@gomomento/sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@google-ai/generativelanguage": {
|
||||
"optional": true
|
||||
},
|
||||
"@google-cloud/storage": {
|
||||
"optional": true
|
||||
},
|
||||
"@huggingface/inference": {
|
||||
"optional": true
|
||||
},
|
||||
"@mozilla/readability": {
|
||||
"optional": true
|
||||
},
|
||||
"@notionhq/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@opensearch-project/opensearch": {
|
||||
"optional": true
|
||||
},
|
||||
"@pinecone-database/pinecone": {
|
||||
"optional": true
|
||||
},
|
||||
"@planetscale/database": {
|
||||
"optional": true
|
||||
},
|
||||
"@qdrant/js-client-rest": {
|
||||
"optional": true
|
||||
},
|
||||
"@raycast/api": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/eventstream-codec": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/protocol-http": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/signature-v4": {
|
||||
"optional": true
|
||||
},
|
||||
"@smithy/util-utf8": {
|
||||
"optional": true
|
||||
},
|
||||
"@supabase/postgrest-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@supabase/supabase-js": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow-models/universal-sentence-encoder": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow/tfjs-converter": {
|
||||
"optional": true
|
||||
},
|
||||
"@tensorflow/tfjs-core": {
|
||||
"optional": true
|
||||
},
|
||||
"@upstash/redis": {
|
||||
"optional": true
|
||||
},
|
||||
"@vercel/postgres": {
|
||||
"optional": true
|
||||
},
|
||||
"@writerai/writer-sdk": {
|
||||
"optional": true
|
||||
},
|
||||
"@xata.io/client": {
|
||||
"optional": true
|
||||
},
|
||||
"@xenova/transformers": {
|
||||
"optional": true
|
||||
},
|
||||
"@zilliz/milvus2-sdk-node": {
|
||||
"optional": true
|
||||
},
|
||||
"apify-client": {
|
||||
"optional": true
|
||||
},
|
||||
"axios": {
|
||||
"optional": true
|
||||
},
|
||||
"cassandra-driver": {
|
||||
"optional": true
|
||||
},
|
||||
"cheerio": {
|
||||
"optional": true
|
||||
},
|
||||
"chromadb": {
|
||||
"optional": true
|
||||
},
|
||||
"cohere-ai": {
|
||||
"optional": true
|
||||
},
|
||||
"d3-dsv": {
|
||||
"optional": true
|
||||
},
|
||||
"epub2": {
|
||||
"optional": true
|
||||
},
|
||||
"faiss-node": {
|
||||
"optional": true
|
||||
},
|
||||
"fast-xml-parser": {
|
||||
"optional": true
|
||||
},
|
||||
"firebase-admin": {
|
||||
"optional": true
|
||||
},
|
||||
"google-auth-library": {
|
||||
"optional": true
|
||||
},
|
||||
"googleapis": {
|
||||
"optional": true
|
||||
},
|
||||
"hnswlib-node": {
|
||||
"optional": true
|
||||
},
|
||||
"html-to-text": {
|
||||
"optional": true
|
||||
},
|
||||
"ignore": {
|
||||
"optional": true
|
||||
},
|
||||
"ioredis": {
|
||||
"optional": true
|
||||
},
|
||||
"jsdom": {
|
||||
"optional": true
|
||||
},
|
||||
"llmonitor": {
|
||||
"optional": true
|
||||
},
|
||||
"lodash": {
|
||||
"optional": true
|
||||
},
|
||||
"mammoth": {
|
||||
"optional": true
|
||||
},
|
||||
"mongodb": {
|
||||
"optional": true
|
||||
},
|
||||
"mysql2": {
|
||||
"optional": true
|
||||
},
|
||||
"neo4j-driver": {
|
||||
"optional": true
|
||||
},
|
||||
"node-llama-cpp": {
|
||||
"optional": true
|
||||
},
|
||||
"notion-to-md": {
|
||||
"optional": true
|
||||
},
|
||||
"pdf-parse": {
|
||||
"optional": true
|
||||
},
|
||||
"peggy": {
|
||||
"optional": true
|
||||
},
|
||||
"pg": {
|
||||
"optional": true
|
||||
},
|
||||
"pg-copy-streams": {
|
||||
"optional": true
|
||||
},
|
||||
"pickleparser": {
|
||||
"optional": true
|
||||
},
|
||||
"playwright": {
|
||||
"optional": true
|
||||
},
|
||||
"portkey-ai": {
|
||||
"optional": true
|
||||
},
|
||||
"puppeteer": {
|
||||
"optional": true
|
||||
},
|
||||
"redis": {
|
||||
"optional": true
|
||||
},
|
||||
"replicate": {
|
||||
"optional": true
|
||||
},
|
||||
"sonix-speech-recognition": {
|
||||
"optional": true
|
||||
},
|
||||
"srt-parser-2": {
|
||||
"optional": true
|
||||
},
|
||||
"typeorm": {
|
||||
"optional": true
|
||||
},
|
||||
"typesense": {
|
||||
"optional": true
|
||||
},
|
||||
"usearch": {
|
||||
"optional": true
|
||||
},
|
||||
"vectordb": {
|
||||
"optional": true
|
||||
},
|
||||
"voy-search": {
|
||||
"optional": true
|
||||
},
|
||||
"weaviate-ts-client": {
|
||||
"optional": true
|
||||
},
|
||||
"web-auth-library": {
|
||||
"optional": true
|
||||
},
|
||||
"youtube-transcript": {
|
||||
"optional": true
|
||||
},
|
||||
"youtubei.js": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/langchainhub": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/langchainhub/-/langchainhub-0.0.6.tgz",
|
||||
"integrity": "sha512-SW6105T+YP1cTe0yMf//7kyshCgvCTyFBMTgH2H3s9rTAR4e+78DA/BBrUL/Mt4Q5eMWui7iGuAYb3pgGsdQ9w=="
|
||||
},
|
||||
"node_modules/langsmith": {
|
||||
"version": "0.0.42",
|
||||
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.0.42.tgz",
|
||||
"integrity": "sha512-sFuN+e7E+pPBIRaRgFqZh/BRBWNHTZNAwi6uj4kydQawooCZYoJmM5snOkiQrhVSvAhgu6xFhLvmfvkPcKzD7w==",
|
||||
"dependencies": {
|
||||
"@types/uuid": "^9.0.1",
|
||||
"commander": "^10.0.1",
|
||||
"p-queue": "^6.6.2",
|
||||
"p-retry": "4",
|
||||
"uuid": "^9.0.0"
|
||||
},
|
||||
"bin": {
|
||||
"langsmith": "dist/cli/main.cjs"
|
||||
}
|
||||
},
|
||||
"node_modules/md5": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz",
|
||||
"integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==",
|
||||
"dependencies": {
|
||||
"charenc": "0.0.2",
|
||||
"crypt": "0.0.2",
|
||||
"is-buffer": "~1.1.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-db": {
|
||||
"version": "1.52.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
|
||||
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-types": {
|
||||
"version": "2.1.35",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
|
||||
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
|
||||
"dependencies": {
|
||||
"mime-db": "1.52.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-array-mean": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz",
|
||||
"integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==",
|
||||
"dependencies": {
|
||||
"ml-array-sum": "^1.1.6"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-array-sum": {
|
||||
"version": "1.1.6",
|
||||
"resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz",
|
||||
"integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==",
|
||||
"dependencies": {
|
||||
"is-any-array": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-distance": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.1.tgz",
|
||||
"integrity": "sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw==",
|
||||
"dependencies": {
|
||||
"ml-array-mean": "^1.1.6",
|
||||
"ml-distance-euclidean": "^2.0.0",
|
||||
"ml-tree-similarity": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ml-distance-euclidean": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz",
|
||||
"integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q=="
|
||||
},
|
||||
"node_modules/ml-tree-similarity": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz",
|
||||
"integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==",
|
||||
"dependencies": {
|
||||
"binary-search": "^1.3.5",
|
||||
"num-sort": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ms": {
|
||||
"version": "2.1.3",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
|
||||
},
|
||||
"node_modules/node-domexception": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
|
||||
"integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/jimmywarting"
|
||||
},
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://paypal.me/jimmywarting"
|
||||
}
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=10.5.0"
|
||||
}
|
||||
},
|
||||
"node_modules/node-fetch": {
|
||||
"version": "2.7.0",
|
||||
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
|
||||
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
|
||||
"dependencies": {
|
||||
"whatwg-url": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": "4.x || >=6.0.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"encoding": "^0.1.0"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"encoding": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/num-sort": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz",
|
||||
"integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/object-hash": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
|
||||
"integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/openai": {
|
||||
"version": "4.4.0",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-4.4.0.tgz",
|
||||
"integrity": "sha512-JN0t628Kh95T0IrXl0HdBqnlJg+4Vq0Bnh55tio+dfCnyzHvMLiWyCM9m726MAJD2YkDU4/8RQB6rNbEq9ct2w==",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"digest-fetch": "^1.3.0",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
},
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
}
|
||||
},
|
||||
"node_modules/openapi-types": {
|
||||
"version": "12.1.3",
|
||||
"resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz",
|
||||
"integrity": "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw=="
|
||||
},
|
||||
"node_modules/p-finally": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
|
||||
"integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==",
|
||||
"engines": {
|
||||
"node": ">=4"
|
||||
}
|
||||
},
|
||||
"node_modules/p-queue": {
|
||||
"version": "6.6.2",
|
||||
"resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz",
|
||||
"integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==",
|
||||
"dependencies": {
|
||||
"eventemitter3": "^4.0.4",
|
||||
"p-timeout": "^3.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/p-retry": {
|
||||
"version": "4.6.2",
|
||||
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
|
||||
"integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
|
||||
"dependencies": {
|
||||
"@types/retry": "0.12.0",
|
||||
"retry": "^0.13.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/p-timeout": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz",
|
||||
"integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==",
|
||||
"dependencies": {
|
||||
"p-finally": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/retry": {
|
||||
"version": "0.13.1",
|
||||
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
|
||||
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
|
||||
"engines": {
|
||||
"node": ">= 4"
|
||||
}
|
||||
},
|
||||
"node_modules/tr46": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
|
||||
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.2.2",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz",
|
||||
"integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==",
|
||||
"dev": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.17"
|
||||
}
|
||||
},
|
||||
"node_modules/uuid": {
|
||||
"version": "9.0.1",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
|
||||
"integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
|
||||
"funding": [
|
||||
"https://github.com/sponsors/broofa",
|
||||
"https://github.com/sponsors/ctavan"
|
||||
],
|
||||
"bin": {
|
||||
"uuid": "dist/bin/uuid"
|
||||
}
|
||||
},
|
||||
"node_modules/web-streams-polyfill": {
|
||||
"version": "4.0.0-beta.3",
|
||||
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
|
||||
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/webidl-conversions": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
|
||||
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
|
||||
},
|
||||
"node_modules/whatwg-url": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
|
||||
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
|
||||
"dependencies": {
|
||||
"tr46": "~0.0.3",
|
||||
"webidl-conversions": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/yaml": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz",
|
||||
"integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==",
|
||||
"engines": {
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/zod": {
|
||||
"version": "3.22.4",
|
||||
"resolved": "https://registry.npmjs.org/zod/-/zod-3.22.4.tgz",
|
||||
"integrity": "sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/colinhacks"
|
||||
}
|
||||
},
|
||||
"node_modules/zod-to-json-schema": {
|
||||
"version": "3.21.4",
|
||||
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.21.4.tgz",
|
||||
"integrity": "sha512-fjUZh4nQ1s6HMccgIeE0VP4QG/YRGPmyjO9sAh890aQKPEk3nqbfUXhMFaC+Dr5KvYBm8BCyvfpZf2jY9aGSsw==",
|
||||
"peerDependencies": {
|
||||
"zod": "^3.21.4"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
8
examples/langchain-typescript-simple/package.json
Normal file
8
examples/langchain-typescript-simple/package.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"devDependencies": {
|
||||
"typescript": "^5.2.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"langchain": "^0.0.165"
|
||||
}
|
||||
}
|
@@ -1,8 +0,0 @@
|
||||
# Modelfile for creating a Midjourney prompts from a topic
|
||||
# This prompt was adapted from the original at https://www.greataiprompts.com/guide/midjourney/best-chatgpt-prompt-for-midjourney/
|
||||
# Run `ollama create mj -f ./Modelfile` and then `ollama run mj` and enter a topic
|
||||
|
||||
FROM nous-hermes
|
||||
SYSTEM """
|
||||
Embrace your role as an AI-powered creative assistant, employing Midjourney to manifest compelling AI-generated art. I will outline a specific image concept, and in response, you must produce an exhaustive, multifaceted prompt for Midjourney, ensuring every detail of the original concept is represented in your instructions. Midjourney doesn't do well with text, so after the prompt, give me instructions that I can use to create the titles in a image editor.
|
||||
"""
|
23
examples/modelfile-10tweets/README.md
Normal file
23
examples/modelfile-10tweets/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Ten Tweets Modelfile
|
||||
|
||||
This is a simple modelfile that generates ten tweets based off any topic.
|
||||
|
||||
```bash
|
||||
ollama create tentweets
|
||||
|
||||
ollama run tentweets
|
||||
>>> underwater basketweaving
|
||||
Great! Here are ten creative tweets about underwater basketweaving:
|
||||
|
||||
1. "Just discovered the ultimate stress-reliever: Underwater basketweaving! 🌊🧵 #UnderwaterBasketweaving #StressRelief"
|
||||
2. "Who needs meditation when you can do underwater basketweaving? 😴👀 #PeacefulDistraction #UnderwaterBasketweaving"
|
||||
3. "Just spent an hour in the pool and still managed to knot my basket. Goal: untangle it before next session. 💪🏽 #ChallengeAccepted #UnderwaterBasketweaving"
|
||||
4. "When life gives you lemons, make underwater basketweaving! 🍋🧵 #LemonadeLife #UnderwaterBasketweaving"
|
||||
5. "Just realized my underwater basketweaving skills could come in handy during a zombie apocalypse. 😂🧡 #SurvivalTips #UnderwaterBasketweaving"
|
||||
6. "I'm not lazy, I'm just conserving energy for my next underwater basketweaving session. 😴💤 #LazyDay #UnderwaterBasketweaving"
|
||||
7. "Just found my inner peace while doing underwater basketweaving. It's like meditation, but with knots! 🙏🧵 #Mindfulness #UnderwaterBasketweaving"
|
||||
8. "Why study for exams when you can do underwater basketweaving and forget all your worries? 😜🧵 #ProcrastinationStation #UnderwaterBasketweaving"
|
||||
9. "Just had to cut my underwater basketweaving session short due to a sudden urge to breathe. 🤯🌊 #AquaticAdventures #UnderwaterBasketweaving"
|
||||
10. "I'm not sure what's more impressive: my underwater basketweaving skills or the fact that I didn't drown trying to make this tweet. 😅🧵 #Accomplishment
|
||||
#UnderwaterBasketweaving"
|
||||
```
|
@@ -1,7 +1,7 @@
|
||||
# Modelfile for creating a devops engineer assistant
|
||||
# Run `ollama create devops-engineer -f ./Modelfile` and then `ollama run devops-engineer` and enter a topic
|
||||
|
||||
FROM llama2:13b
|
||||
FROM mistral
|
||||
PARAMETER temperature 1
|
||||
SYSTEM """
|
||||
You are a senior devops engineer, acting as an assistant. You offer help with cloud technologies like: Terraform, AWS, kubernetes, python. You answer with code examples when possible
|
40
examples/modelfile-devopsengineer/README.md
Normal file
40
examples/modelfile-devopsengineer/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# DevOps Engineer Modelfile
|
||||
|
||||
This is a simple modelfile that takes will respond to any questions that a senior DevOps engineer should know how to solve.
|
||||
|
||||
````text
|
||||
ollama create devopsengineer
|
||||
|
||||
ollama run devopsengineer
|
||||
>>> how can i proxy all connections from inside the office through a single server that caches large files. Provide a config file for nginx to achieve this?
|
||||
|
||||
Here is an example configuration file for Nginx that can be used to proxy all connections from inside the office network to a centralized cache server:
|
||||
```perl
|
||||
http {
|
||||
upstream cache_server {
|
||||
server <cache_server_ip>;
|
||||
server <cache_server_ip>;
|
||||
server <cache_server_ip>;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
location / {
|
||||
proxy_pass http://cache_server;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
In this configuration, we define an `upstream` block that lists the IP addresses of the cache servers. We then define a server block that listens on port 80 and routes all
|
||||
traffic to the `/` location to the cache servers using the `proxy_pass` directive. The `proxy_set_header` directives are used to preserve the source IP address of the client
|
||||
request when forwarding it to the cache server.
|
||||
|
||||
To use this configuration, you would need to replace the placeholder `<cache_server_ip>` with the actual IP addresses of your cache servers. You would also need to make sure
|
||||
that the cache servers are configured to accept incoming connections from the Nginx server and handle requests for files.
|
||||
|
||||
````
|
Before Width: | Height: | Size: 446 KiB After Width: | Height: | Size: 446 KiB |
11
examples/modelfile-midjourney/Modelfile
Normal file
11
examples/modelfile-midjourney/Modelfile
Normal file
@@ -0,0 +1,11 @@
|
||||
# Modelfile for creating a Midjourney prompts from a topic
|
||||
# This prompt was adapted from the original at https://www.greataiprompts.com/guide/midjourney/best-chatgpt-prompt-for-midjourney/
|
||||
# Run `ollama create mj -f ./Modelfile` and then `ollama run mj` and enter a topic
|
||||
|
||||
FROM zephyr
|
||||
PARAMETER temperature 0.8
|
||||
PARAMETER top_k 500
|
||||
PARAMETER top_p 0.9
|
||||
SYSTEM """
|
||||
Embrace your role as a creative illustrator. Based on a concept provided, you must produce a single paragraph with a multifaceted description of an image, ensuring significant details of the concept and more is represented in your instructions. You do not need to write complete sentences but rather short concepts with the following information: the level of detail that should be represented, an artistic style and maybe a specific name of a painter or illustrator, the ideal color pallete, lighting, mood, perspective, the setting, time of day, weather, the season, the time period, location, materials, the textures, patterns, lines, brushstrokes, techniques, the medium, the genre, the rendering style. Don't include everything and keep the description length under 250 words.
|
||||
"""
|
11
examples/modelfile-midjourney/README.md
Normal file
11
examples/modelfile-midjourney/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Midjourney Prompt Generator Modelfile
|
||||
|
||||
This simple modelfile will help create a prompt to feed to Midjourney.
|
||||
|
||||
```text
|
||||
ollama create midjourney
|
||||
|
||||
ollama run midjourney
|
||||
>>> a sports car in the mountains.
|
||||
A sleek, high-performance automobile cuts through a serpentine mountain landscape. The concept is a classic illustration of speed and power, depicted in the style of pop art by Andy Warhol. The color palette is dominated by bold, primary hues of red, blue, and yellow, with striking accent colors of white, black, and metallic shades. The lighting is bright and focused, casting sharp shadows on the rugged terrain. A sense of excitement and anticipation permeates throughout the scene, as the car navigates a treacherous course through the winding road. The perspective is low, allowing for a full view of the vehicle's sleek lines and intricate details. The setting takes place in the afternoon during a sunny day in autumn, as evidenced by the vibrant foliage on the mountainside. The time period is modern, with nods to classic car design. The materials are primarily digital, allowing for smooth curves and sharp contrasts. The textures are sleek and polished, with meticulously detailed lines and brushstrokes that accentuate the car's aerodynamic design. The patterns consist of geometric shapes and bold stripes, adding to the car's dynamic appeal. The genre is modern realism, with a focus on precision and detail. The rendering style is highly technical, capturing the nuances and subtleties of the vehicle and its surroundings in breathtaking detail.
|
||||
```
|
20
examples/modelfile-recipemaker/README.md
Normal file
20
examples/modelfile-recipemaker/README.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Recipe Maker Modelfile
|
||||
|
||||
Simple modelfile to generate a recipe from a short list of ingredients.
|
||||
|
||||
```
|
||||
ollama create recipemaker
|
||||
|
||||
ollama run recipemaker
|
||||
>>> chilli pepper, white chocolate, kale
|
||||
Ingredients:
|
||||
- 1 small chili pepper
|
||||
- 4 squares of white chocolate
|
||||
- handful of kale leaves
|
||||
|
||||
Instructions:
|
||||
1. In a blender or food processor, puree the chilies and white chocolate until smooth.
|
||||
2. Add the chopped kale leaves to the blender and pulse until well combined.
|
||||
3. Serve immediately as a dip for crackers or use it as an ingredient in your favorite recipe. The mixture of spicy chili pepper with sweet white chocolate and nutritious
|
||||
kale will make your taste buds dance with delight!
|
||||
```
|
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
FROM llama2
|
||||
FROM mistral
|
||||
SYSTEM """
|
||||
You are an experienced Devops engineer focused on docker. When given specifications for a particular need or application you know the best way to host that within a docker container. For instance if someone tells you they want an nginx server to host files located at /web you will answer as follows
|
||||
|
31
examples/python-json-datagenerator/predefinedschema.py
Normal file
31
examples/python-json-datagenerator/predefinedschema.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import requests
|
||||
import json
|
||||
import random
|
||||
|
||||
model = "llama2"
|
||||
template = {
|
||||
"firstName": "",
|
||||
"lastName": "",
|
||||
"address": {
|
||||
"street": "",
|
||||
"city": "",
|
||||
"state": "",
|
||||
"zipCode": ""
|
||||
},
|
||||
"phoneNumber": ""
|
||||
}
|
||||
|
||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in the US, and phone number. \nUse the following template: {json.dumps(template)}."
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"format": "json",
|
||||
"stream": False,
|
||||
"options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100},
|
||||
}
|
||||
|
||||
print(f"Generating a sample user")
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=False)
|
||||
json_data = json.loads(response.text)
|
||||
print(json.dumps(json.loads(json_data["response"]), indent=2))
|
31
examples/python-json-datagenerator/randomaddresses.py
Normal file
31
examples/python-json-datagenerator/randomaddresses.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import requests
|
||||
import json
|
||||
import random
|
||||
|
||||
countries = [
|
||||
"United States",
|
||||
"United Kingdom",
|
||||
"the Netherlands",
|
||||
"Germany",
|
||||
"Mexico",
|
||||
"Canada",
|
||||
"France",
|
||||
]
|
||||
country = random.choice(countries)
|
||||
model = "llama2"
|
||||
|
||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"format": "json",
|
||||
"stream": False,
|
||||
"options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100},
|
||||
}
|
||||
|
||||
print(f"Generating a sample user in {country}")
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=False)
|
||||
json_data = json.loads(response.text)
|
||||
|
||||
print(json.dumps(json.loads(json_data["response"]), indent=2))
|
34
examples/python-json-datagenerator/readme.md
Normal file
34
examples/python-json-datagenerator/readme.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# JSON Output Example
|
||||
|
||||

|
||||
|
||||
There are two python scripts in this example. `randomaddresses.py` generates random addresses from different countries. `predefinedschema.py` sets a template for the model to fill in.
|
||||
|
||||
## Review the Code
|
||||
|
||||
Both programs are basically the same, with a different prompt for each, demonstrating two different ideas. The key part of getting JSON out of a model is to state in the prompt or system prompt that it should respond using JSON, and specifying the `format` as `json` in the data body.
|
||||
|
||||
```python
|
||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should with no backslashes, values should use plain ascii with no special characters."
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"format": "json",
|
||||
"stream": False,
|
||||
"options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100},
|
||||
}
|
||||
```
|
||||
|
||||
When running `randomaddresses.py` you will see that the schema changes and adapts to the chosen country.
|
||||
|
||||
In `predefinedschema.py`, a template has been specified in the prompt as well. It's been defined as JSON and then dumped into the prompt string to make it easier to work with.
|
||||
|
||||
Both examples turn streaming off so that we end up with the completed JSON all at once. We need to convert the `response.text` to JSON so that when we output it as a string we can set the indent spacing to make the output easy to read.
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=False)
|
||||
json_data = json.loads(response.text)
|
||||
|
||||
print(json.dumps(json.loads(json_data["response"]), indent=2))
|
||||
```
|
1
examples/python-json-datagenerator/requirements.txt
Normal file
1
examples/python-json-datagenerator/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
Requests==2.31.0
|
8
examples/python-loganalysis/Modelfile
Normal file
8
examples/python-loganalysis/Modelfile
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM codebooga:latest
|
||||
|
||||
SYSTEM """
|
||||
You are a log file analyzer. You will receive a set of lines from a log file for some software application, find the errors and other interesting aspects of the logs, and explain them so a new user can understand what they mean. If there are any steps they can do to resolve them, list the steps in your answer.
|
||||
"""
|
||||
|
||||
PARAMETER TEMPERATURE 0.3
|
||||
|
42
examples/python-loganalysis/loganalysis.py
Normal file
42
examples/python-loganalysis/loganalysis.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import sys
|
||||
import re
|
||||
import requests
|
||||
import json
|
||||
|
||||
# prelines and postlines represent the number of lines of context to include in the output around the error
|
||||
prelines = 10
|
||||
postlines = 10
|
||||
|
||||
def find_errors_in_log_file():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python loganalysis.py <filename>")
|
||||
return
|
||||
|
||||
log_file_path = sys.argv[1]
|
||||
with open(log_file_path, 'r') as log_file:
|
||||
log_lines = log_file.readlines()
|
||||
|
||||
error_logs = []
|
||||
for i, line in enumerate(log_lines):
|
||||
if "error" in line.lower():
|
||||
start_index = max(0, i - prelines)
|
||||
end_index = min(len(log_lines), i + postlines + 1)
|
||||
error_logs.extend(log_lines[start_index:end_index])
|
||||
|
||||
return error_logs
|
||||
|
||||
error_logs = find_errors_in_log_file()
|
||||
|
||||
data = {
|
||||
"prompt": "\n".join(error_logs),
|
||||
"model": "mattw/loganalyzer"
|
||||
}
|
||||
|
||||
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=True)
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
json_data = json.loads(line)
|
||||
if json_data['done'] == False:
|
||||
print(json_data['response'], end='', flush=True)
|
||||
|
32
examples/python-loganalysis/logtest.logfile
Normal file
32
examples/python-loganalysis/logtest.logfile
Normal file
@@ -0,0 +1,32 @@
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
|
||||
2023-11-10 07:17:40 10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
|
||||
2023-11-10 07:17:40 10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Configuration complete; ready for start up
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: using the "epoll" event method
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: nginx/1.25.3
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14)
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: OS: Linux 6.4.16-linuxkit
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker processes
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 29
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 30
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 31
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 32
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 33
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 34
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 35
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 36
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 37
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 38
|
||||
2023-11-10 07:17:44 192.168.65.1 - - [10/Nov/2023:13:17:43 +0000] "GET / HTTP/1.1" 200 615 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
||||
2023-11-10 07:17:44 2023/11/10 13:17:44 [error] 29#29: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "localhost:8080", referrer: "http://localhost:8080/"
|
||||
2023-11-10 07:17:44 192.168.65.1 - - [10/Nov/2023:13:17:44 +0000] "GET /favicon.ico HTTP/1.1" 404 555 "http://localhost:8080/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
||||
2023-11-10 07:17:50 2023/11/10 13:17:50 [error] 29#29: *1 open() "/usr/share/nginx/html/ahstat" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /ahstat HTTP/1.1", host: "localhost:8080"
|
||||
2023-11-10 07:17:50 192.168.65.1 - - [10/Nov/2023:13:17:50 +0000] "GET /ahstat HTTP/1.1" 404 555 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
||||
2023-11-10 07:18:53 2023/11/10 13:18:53 [error] 29#29: *1 open() "/usr/share/nginx/html/ahstat" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /ahstat HTTP/1.1", host: "localhost:8080"
|
||||
2023-11-10 07:18:53 192.168.65.1 - - [10/Nov/2023:13:18:53 +0000] "GET /ahstat HTTP/1.1" 404 555 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
48
examples/python-loganalysis/readme.md
Normal file
48
examples/python-loganalysis/readme.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Log Analysis example
|
||||
|
||||

|
||||
|
||||
This example shows one possible way to create a log file analyzer. To use it, run:
|
||||
|
||||
`python loganalysis.py <logfile>`
|
||||
|
||||
You can try this with the `logtest.logfile` file included in this directory.
|
||||
|
||||
## Review the code
|
||||
|
||||
The first part of this example is a Modelfile that takes `codebooga` and applies a new System Prompt:
|
||||
|
||||
```plaintext
|
||||
SYSTEM """
|
||||
You are a log file analyzer. You will receive a set of lines from a log file for some software application, find the errors and other interesting aspects of the logs, and explain them so a new user can understand what they mean. If there are any steps they can do to resolve them, list the steps in your answer.
|
||||
"""
|
||||
```
|
||||
|
||||
This model is available at https://ollama.ai/mattw/loganalyzer. You can customize it and add to your own namespace using the command `ollama create <namespace/modelname> -f <path-to-modelfile>` then `ollama push <namespace/modelname>`.
|
||||
|
||||
Then loganalysis.py scans all the lines in the given log file and searches for the word 'error'. When the word is found, the 10 lines before and after are set as the prompt for a call to the Generate API.
|
||||
|
||||
```python
|
||||
data = {
|
||||
"prompt": "\n".join(error_logs),
|
||||
"model": "mattw/loganalyzer"
|
||||
}
|
||||
```
|
||||
|
||||
Finally, the streamed output is parsed and the response field in the output is printed to the line.
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=True)
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
json_data = json.loads(line)
|
||||
if json_data['done'] == False:
|
||||
print(json_data['response'], end='')
|
||||
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
There is a lot more that can be done here. This is a simple way to detect errors, looking for the word error. Perhaps it would be interesting to find anomalous activity in the logs. It could be interesting to create embeddings for each line and compare them, looking for similar lines. Or look into applying Levenshtein Distance algorithms to find similar lines to help identify the anomalous lines.
|
||||
|
||||
Also try different models and different prompts to analyze the data. You could consider adding retrieval augmented generation (RAG) to this to help understand newer log formats.
|
1
examples/python-loganalysis/requirements.txt
Normal file
1
examples/python-loganalysis/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
Requests==2.31.0
|
22
examples/python-rag-newssummary/README.md
Normal file
22
examples/python-rag-newssummary/README.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# News Summarizer
|
||||
|
||||
This example goes through a series of steps:
|
||||
|
||||
1. You choose a topic area (e.g., "news", "NVidia", "music", etc.).
|
||||
2. Gets the most recent articles on that topic from various sources.
|
||||
3. Uses Ollama to summarize each article.
|
||||
4. Creates chunks of sentences from each article.
|
||||
5. Uses Sentence Transformers to generate embeddings for each of those chunks.
|
||||
6. You enter a question regarding the summaries shown.
|
||||
7. Uses Sentence Transformers to generate an embedding for that question.
|
||||
8. Uses the embedded question to find the most similar chunks.
|
||||
9. Feeds all that to Ollama to generate a good answer to your question based on these news articles.
|
||||
|
||||
This example lets you pick from a few different topic areas, then summarize the most recent x articles for that topic. It then creates chunks of sentences from each article and then generates embeddings for each of those chunks.
|
||||
|
||||
You can run the example like this:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
python summ.py
|
||||
```
|
9
examples/python-rag-newssummary/requirements.txt
Normal file
9
examples/python-rag-newssummary/requirements.txt
Normal file
@@ -0,0 +1,9 @@
|
||||
beautifulsoup4==4.12.2
|
||||
feedparser==6.0.10
|
||||
mattsollamatools==0.0.8
|
||||
newspaper3k==0.2.8
|
||||
nltk==3.8.1
|
||||
numpy==1.24.3
|
||||
Requests==2.31.0
|
||||
scikit_learn==1.3.0
|
||||
sentence_transformers==2.2.2
|
86
examples/python-rag-newssummary/summ.py
Normal file
86
examples/python-rag-newssummary/summ.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import curses
|
||||
import json
|
||||
from utils import get_url_for_topic, topic_urls, menu, getUrls, get_summary, getArticleText, knn_search
|
||||
import requests
|
||||
from sentence_transformers import SentenceTransformer
|
||||
from mattsollamatools import chunker
|
||||
|
||||
if __name__ == "__main__":
|
||||
chosen_topic = curses.wrapper(menu)
|
||||
print("Here is your news summary:\n")
|
||||
urls = getUrls(chosen_topic, n=5)
|
||||
model = SentenceTransformer('all-MiniLM-L6-v2')
|
||||
allEmbeddings = []
|
||||
|
||||
for url in urls:
|
||||
article={}
|
||||
article['embeddings'] = []
|
||||
article['url'] = url
|
||||
text = getArticleText(url)
|
||||
summary = get_summary(text)
|
||||
chunks = chunker(text) # Use the chunk_text function from web_utils
|
||||
embeddings = model.encode(chunks)
|
||||
for (chunk, embedding) in zip(chunks, embeddings):
|
||||
item = {}
|
||||
item['source'] = chunk
|
||||
item['embedding'] = embedding.tolist() # Convert NumPy array to list
|
||||
item['sourcelength'] = len(chunk)
|
||||
article['embeddings'].append(item)
|
||||
|
||||
allEmbeddings.append(article)
|
||||
|
||||
print(f"{summary}\n")
|
||||
|
||||
|
||||
while True:
|
||||
context = []
|
||||
# Input a question from the user
|
||||
question = input("Enter your question about the news, or type quit: ")
|
||||
|
||||
if question.lower() == 'quit':
|
||||
break
|
||||
|
||||
# Embed the user's question
|
||||
question_embedding = model.encode([question])
|
||||
|
||||
# Perform KNN search to find the best matches (indices and source text)
|
||||
best_matches = knn_search(question_embedding, allEmbeddings, k=10)
|
||||
|
||||
|
||||
sourcetext=""
|
||||
for i, (index, source_text) in enumerate(best_matches, start=1):
|
||||
sourcetext += f"{i}. Index: {index}, Source Text: {source_text}"
|
||||
|
||||
systemPrompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}"
|
||||
|
||||
url = "http://localhost:11434/api/generate"
|
||||
|
||||
payload = {
|
||||
"model": "mistral-openorca",
|
||||
"prompt": question,
|
||||
"system": systemPrompt,
|
||||
"stream": False,
|
||||
"context": context
|
||||
}
|
||||
|
||||
# Convert the payload to a JSON string
|
||||
payload_json = json.dumps(payload)
|
||||
|
||||
# Set the headers to specify JSON content
|
||||
headers = {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
# Send the POST request
|
||||
response = requests.post(url, data=payload_json, headers=headers)
|
||||
|
||||
# Check the response
|
||||
if response.status_code == 200:
|
||||
output = json.loads(response.text)
|
||||
context = output['context']
|
||||
print(output['response']+ "\n")
|
||||
|
||||
|
||||
else:
|
||||
print(f"Request failed with status code {response.status_code}")
|
||||
|
108
examples/python-rag-newssummary/utils.py
Normal file
108
examples/python-rag-newssummary/utils.py
Normal file
@@ -0,0 +1,108 @@
|
||||
import curses
|
||||
import feedparser
|
||||
import requests
|
||||
import unicodedata
|
||||
import json
|
||||
from newspaper import Article
|
||||
from bs4 import BeautifulSoup
|
||||
from nltk.tokenize import sent_tokenize, word_tokenize
|
||||
import numpy as np
|
||||
from sklearn.neighbors import NearestNeighbors
|
||||
from mattsollamatools import chunker
|
||||
|
||||
# Create a dictionary to store topics and their URLs
|
||||
topic_urls = {
|
||||
"Mac": "https://9to5mac.com/guides/mac/feed",
|
||||
"News": "http://www.npr.org/rss/rss.php?id=1001",
|
||||
"Nvidia": "https://nvidianews.nvidia.com/releases.xml",
|
||||
"Raspberry Pi": "https://www.raspberrypi.com/news/feed/",
|
||||
"Music": "https://www.billboard.com/c/music/music-news/feed/"
|
||||
}
|
||||
|
||||
# Use curses to create a menu of topics
|
||||
def menu(stdscr):
|
||||
chosen_topic = get_url_for_topic(stdscr)
|
||||
url = topic_urls[chosen_topic] if chosen_topic in topic_urls else "Topic not found"
|
||||
|
||||
stdscr.addstr(len(topic_urls) + 3, 0, f"Selected URL for {chosen_topic}: {url}")
|
||||
stdscr.refresh()
|
||||
|
||||
return chosen_topic
|
||||
|
||||
# You have chosen a topic. Now return the url for that topic
|
||||
def get_url_for_topic(stdscr):
|
||||
curses.curs_set(0) # Hide the cursor
|
||||
stdscr.clear()
|
||||
|
||||
stdscr.addstr(0, 0, "Choose a topic using the arrow keys (Press Enter to select):")
|
||||
|
||||
# Create a list of topics
|
||||
topics = list(topic_urls.keys())
|
||||
current_topic = 0
|
||||
|
||||
while True:
|
||||
for i, topic in enumerate(topics):
|
||||
if i == current_topic:
|
||||
stdscr.addstr(i + 2, 2, f"> {topic}")
|
||||
else:
|
||||
stdscr.addstr(i + 2, 2, f" {topic}")
|
||||
|
||||
stdscr.refresh()
|
||||
|
||||
key = stdscr.getch()
|
||||
|
||||
if key == curses.KEY_DOWN and current_topic < len(topics) - 1:
|
||||
current_topic += 1
|
||||
elif key == curses.KEY_UP and current_topic > 0:
|
||||
current_topic -= 1
|
||||
elif key == 10: # Enter key
|
||||
return topic_urls[topics[current_topic]]
|
||||
|
||||
# Get the last N URLs from an RSS feed
|
||||
def getUrls(feed_url, n=20):
|
||||
feed = feedparser.parse(feed_url)
|
||||
entries = feed.entries[-n:]
|
||||
urls = [entry.link for entry in entries]
|
||||
return urls
|
||||
|
||||
# Often there are a bunch of ads and menus on pages for a news article. This uses newspaper3k to get just the text of just the article.
|
||||
def getArticleText(url):
|
||||
article = Article(url)
|
||||
article.download()
|
||||
article.parse()
|
||||
return article.text
|
||||
|
||||
def get_summary(text):
|
||||
systemPrompt = "Write a concise summary of the text, return your responses with 5 lines that cover the key points of the text given."
|
||||
prompt = text
|
||||
|
||||
url = "http://localhost:11434/api/generate"
|
||||
|
||||
payload = {
|
||||
"model": "mistral-openorca",
|
||||
"prompt": prompt,
|
||||
"system": systemPrompt,
|
||||
"stream": False
|
||||
}
|
||||
payload_json = json.dumps(payload)
|
||||
headers = {"Content-Type": "application/json"}
|
||||
response = requests.post(url, data=payload_json, headers=headers)
|
||||
|
||||
return json.loads(response.text)["response"]
|
||||
|
||||
# Perform K-nearest neighbors (KNN) search
|
||||
def knn_search(question_embedding, embeddings, k=5):
|
||||
X = np.array([item['embedding'] for article in embeddings for item in article['embeddings']])
|
||||
source_texts = [item['source'] for article in embeddings for item in article['embeddings']]
|
||||
|
||||
# Fit a KNN model on the embeddings
|
||||
knn = NearestNeighbors(n_neighbors=k, metric='cosine')
|
||||
knn.fit(X)
|
||||
|
||||
# Find the indices and distances of the k-nearest neighbors
|
||||
distances, indices = knn.kneighbors(question_embedding, n_neighbors=k)
|
||||
|
||||
# Get the indices and source texts of the best matches
|
||||
best_matches = [(indices[0][i], source_texts[indices[0][i]]) for i in range(k)]
|
||||
|
||||
return best_matches
|
46
examples/python-simplechat/client.py
Normal file
46
examples/python-simplechat/client.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import json
|
||||
import requests
|
||||
|
||||
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
||||
model = "llama2" # TODO: update this for whatever model you wish to use
|
||||
|
||||
|
||||
def chat(messages):
|
||||
r = requests.post(
|
||||
"http://0.0.0.0:11434/api/chat",
|
||||
json={"model": model, "messages": messages, "stream": True},
|
||||
)
|
||||
r.raise_for_status()
|
||||
output = ""
|
||||
|
||||
for line in r.iter_lines():
|
||||
body = json.loads(line)
|
||||
if "error" in body:
|
||||
raise Exception(body["error"])
|
||||
if body.get("done") is False:
|
||||
message = body.get("message", "")
|
||||
content = message.get("content", "")
|
||||
output += content
|
||||
# the response streams one token at a time, print that as we receive it
|
||||
print(content, end="", flush=True)
|
||||
|
||||
|
||||
if body.get("done", False):
|
||||
message["content"] = output
|
||||
return message
|
||||
|
||||
|
||||
def main():
|
||||
messages = []
|
||||
|
||||
while True:
|
||||
user_input = input("Enter a prompt: ")
|
||||
print()
|
||||
messages.append({"role": "user", "content": user_input})
|
||||
message = chat(messages)
|
||||
messages.append(message)
|
||||
print("\n\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
24
examples/python-simplechat/readme.md
Normal file
24
examples/python-simplechat/readme.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Simple Chat Example
|
||||
|
||||
The **chat** endpoint is one of two ways to generate text from an LLM with Ollama. At a high level you provide the endpoint an array of objects with a role and content specified. Then with each output and prompt, you add more of those role/content objects, which builds up the history.
|
||||
|
||||
## Review the Code
|
||||
|
||||
You can see in the **chat** function that actually calling the endpoint is done simply with:
|
||||
|
||||
```python
|
||||
r = requests.post(
|
||||
"http://0.0.0.0:11434/api/chat",
|
||||
json={"model": model, "messages": messages, "stream": True},
|
||||
)
|
||||
```
|
||||
|
||||
With the **generate** endpoint, you need to provide a `prompt`. But with **chat**, you provide `messages`. And the resulting stream of responses includes a `message` object with a `content` field.
|
||||
|
||||
The final JSON object doesn't provide the full content, so you will need to build the content yourself.
|
||||
|
||||
In the **main** function, we collect `user_input` and add it as a message to our messages and that is passed to the chat function. When the LLM is done responding the output is added as another message.
|
||||
|
||||
## Next Steps
|
||||
|
||||
In this example, all generations are kept. You might want to experiment with summarizing everything older than 10 conversations to enable longer history with less context being used.
|
@@ -17,7 +17,7 @@ def generate(prompt, context):
|
||||
for line in r.iter_lines():
|
||||
body = json.loads(line)
|
||||
response_part = body.get('response', '')
|
||||
# the response streams one token at a time, print that as we recieve it
|
||||
# the response streams one token at a time, print that as we receive it
|
||||
print(response_part, end='', flush=True)
|
||||
|
||||
if 'error' in body:
|
||||
@@ -35,4 +35,4 @@ def main():
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
118
examples/typescript-functioncalling/extractemail.ts
Normal file
118
examples/typescript-functioncalling/extractemail.ts
Normal file
@@ -0,0 +1,118 @@
|
||||
import { Ollama } from "ollama-node";
|
||||
import { readFile } from "fs/promises";
|
||||
|
||||
// function to be called on events
|
||||
function reportEvents(name: string, date: string, location: string) {
|
||||
const nameString = name ? `${name}` : `an event`;
|
||||
const dateString = date ? ` on ${date}` : ``;
|
||||
const locationString = location ? ` at ${location}` : ``;
|
||||
console.log(`You have an event: ${nameString}${dateString}${locationString}`)
|
||||
}
|
||||
|
||||
// function to be called on addresses
|
||||
function reportAddresses(address) {
|
||||
for (const field in address) {
|
||||
if (address[field]) {
|
||||
if (field === "city") {
|
||||
const city = address.city;
|
||||
const state = address.state ? `, ${address.state}` : '';
|
||||
const zip = address.zip ? ` ${address.zip}` : '';
|
||||
console.log(`${city}${state}${zip}`);
|
||||
break;
|
||||
} else {
|
||||
console.log(`${address[field]}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
console.log(``);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
|
||||
const ollama = new Ollama();
|
||||
|
||||
const systemprompt = `You will be given a text along with a prompt and a schema. You will have to extract the information requested in the prompt from the text and generate output in JSON observing the schema provided. If the schema shows a type of integer or number, you must only show a integer for that field. A string should always be a valid string. If a value is unknown, leave it empty. Output the JSON with extra spaces to ensure that it pretty prints.`
|
||||
|
||||
const schema = {
|
||||
"eventsQuantity": {
|
||||
"type": "integer",
|
||||
"description": "The number of events in the source text"
|
||||
},
|
||||
"addressesQuantity": {
|
||||
"type": "integer",
|
||||
"description": "The number of addresses in the source text"
|
||||
},
|
||||
"events": [{
|
||||
name: {
|
||||
"type": "string",
|
||||
description: "Name of the event"
|
||||
},
|
||||
"date": {
|
||||
"type": "string",
|
||||
"description": "Date of the event"
|
||||
},
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "Location of the event"
|
||||
},
|
||||
"extraInfo": {
|
||||
"type": "string",
|
||||
"description": "Any extra information that is provided about the event."
|
||||
}
|
||||
}],
|
||||
"people": [{
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Name of the person"
|
||||
},
|
||||
"company": {
|
||||
"type": "string",
|
||||
"description": "Name of the company where they work"
|
||||
},
|
||||
"street": {
|
||||
"type": "string",
|
||||
"description": "Street address of the person or company. This is only the street name and the numerical address. Do not include city, state, or zip of the address in this field."
|
||||
},
|
||||
"city": {
|
||||
"type": "string",
|
||||
"description": "City portion of the address of the person or company"
|
||||
},
|
||||
"state": {
|
||||
"type": "string",
|
||||
"description": "State portion of the address of the person or company"
|
||||
},
|
||||
"zip": {
|
||||
"type": "string",
|
||||
"description": "Zip code of the person or company"
|
||||
},
|
||||
"extraInfo": {
|
||||
"type": "string",
|
||||
"description": "Any extra information that is provided about the location."
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
const textcontent = await readFile("./info.txt", "utf-8").then((text) => text.split(" ").slice(0, 2000).join(" "));
|
||||
|
||||
const prompt = `The source text is a series of emails that have been put into a single file. They are separated by three dashes. Review the source text and determine the full address of the person sending each of the emails as well as any events that we need to track. If they provide a company address use that. If any extra info is provided, such as a description of the place, or a floor, add it to extraInfo. The first field in the address JSON is quantity of events and should be set to the number of events tracked and the second field should be set to the number of addresses tracked in the file. Don't stuff an event into the output that isn't an event. Only add data to the mostly appropriate field. Don't make up fields that aren't in the schema. If there isn't a value for a field, use null. Output should be in JSON.\n\nSchema: \n${JSON.stringify(schema, null, 2)}\n\nSource Text:\n${textcontent}`
|
||||
|
||||
await ollama.setModel("neural-chat");
|
||||
ollama.setSystemPrompt(systemprompt);
|
||||
ollama.setJSONFormat(true);
|
||||
const data = await ollama.generate(prompt);
|
||||
const output = JSON.parse(data.output);
|
||||
const events = output.events;
|
||||
const addresses = output.people;
|
||||
|
||||
console.log(`Here are your ${output.eventsQuantity} events:`);
|
||||
for (const event of events) {
|
||||
reportEvents(event.name, event.date, event.location);
|
||||
}
|
||||
|
||||
console.log(`\n\nHere are your ${output.addressesQuantity} addresses:`);
|
||||
for (const address of addresses) {
|
||||
reportAddresses(address);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
38
examples/typescript-functioncalling/extractwp.ts
Normal file
38
examples/typescript-functioncalling/extractwp.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { Ollama } from "ollama-node";
|
||||
import { readFile } from "fs/promises";
|
||||
|
||||
async function main() {
|
||||
|
||||
const ollama = new Ollama();
|
||||
|
||||
// Set the system prompt to prepare the model to receive a prompt and a schema and set some rules for the output.
|
||||
const systemprompt = `You will be given a text along with a prompt and a schema. You will have to extract the information requested in the prompt from the text and generate output in JSON observing the schema provided. If the schema shows a type of integer or number, you must only show a integer for that field. A string should always be a valid string. If a value is unknown, leave it empty. Output the JSON with extra spaces to ensure that it pretty prints.`
|
||||
|
||||
const schema = {
|
||||
"people": [{
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Name of the person"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Title of the person"
|
||||
}
|
||||
}],
|
||||
}
|
||||
|
||||
// Depending on the model chosen, you may be limited by the size of the context window, so limit the context to 2000 words.
|
||||
const textcontent = await readFile("./wp.txt", "utf-8").then((text) => text.split(" ").slice(0, 2000).join(" "));
|
||||
|
||||
// Specific instructions for this task
|
||||
const prompt = `Review the source text and determine the 10 most important people to focus on. Then extract the name and title for those people. Output should be in JSON.\n\nSchema: \n${JSON.stringify(schema, null, 2)}\n\nSource Text:\n${textcontent}`
|
||||
|
||||
await ollama.setModel("neural-chat");
|
||||
ollama.setSystemPrompt(systemprompt);
|
||||
|
||||
// setJSONFormat is the equivalent of setting 'format: json' in the API
|
||||
ollama.setJSONFormat(true);
|
||||
await ollama.streamingGenerate(prompt, (word) => { process.stdout.write(word) })
|
||||
}
|
||||
|
||||
main();
|
17
examples/typescript-functioncalling/info.txt
Normal file
17
examples/typescript-functioncalling/info.txt
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
Hi matt,
|
||||
|
||||
thanks for letting me know that you are going to come today, November 16, for my tea party. My address is 123 Falk St on Bainbridge Island. I live in the house with the red door. I will be home all day so just come by whenever you want.
|
||||
|
||||
Fred
|
||||
|
||||
---
|
||||
Great, send the check to our office at 1917 1st St, Seattle, WA 98101. I will let you know when we receive it.
|
||||
|
||||
Mark Richardson
|
||||
Big Corp
|
||||
---
|
||||
We are looking forward to seeing you at our Local AI Meetup. It will be held on December 3. It will be at the offices of Enormous Co. Our address is 344 1st Ave, Seattle, WA 98101. We will be meeting in the conference room on the 3rd floor.
|
||||
|
||||
Barbara Reilly
|
||||
Enormous Co.
|
519
examples/typescript-functioncalling/package-lock.json
generated
Normal file
519
examples/typescript-functioncalling/package-lock.json
generated
Normal file
@@ -0,0 +1,519 @@
|
||||
{
|
||||
"name": "typescript-functioncalling",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"dependencies": {
|
||||
"ollama-node": "^0.1.27"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tsx": "^4.1.2",
|
||||
"typescript": "^5.2.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/android-arm": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.18.20.tgz",
|
||||
"integrity": "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"android"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/android-arm64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz",
|
||||
"integrity": "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"android"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/android-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"android"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/darwin-arm64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz",
|
||||
"integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/darwin-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/freebsd-arm64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz",
|
||||
"integrity": "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"freebsd"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/freebsd-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"freebsd"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-arm": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz",
|
||||
"integrity": "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-arm64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz",
|
||||
"integrity": "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-ia32": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz",
|
||||
"integrity": "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-loong64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz",
|
||||
"integrity": "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==",
|
||||
"cpu": [
|
||||
"loong64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-mips64el": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz",
|
||||
"integrity": "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==",
|
||||
"cpu": [
|
||||
"mips64el"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-ppc64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz",
|
||||
"integrity": "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-riscv64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz",
|
||||
"integrity": "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==",
|
||||
"cpu": [
|
||||
"riscv64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-s390x": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz",
|
||||
"integrity": "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==",
|
||||
"cpu": [
|
||||
"s390x"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/linux-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/netbsd-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"netbsd"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/openbsd-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"openbsd"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/sunos-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"sunos"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/win32-arm64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz",
|
||||
"integrity": "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/win32-ia32": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz",
|
||||
"integrity": "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@esbuild/win32-x64": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz",
|
||||
"integrity": "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "20.9.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.9.0.tgz",
|
||||
"integrity": "sha512-nekiGu2NDb1BcVofVcEKMIwzlx4NjHlcjhoxxKBNLtz15Y1z7MYf549DFvkHSId02Ax6kGwWntIBPC3l/JZcmw==",
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
},
|
||||
"node_modules/buffer-from": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
|
||||
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/esbuild": {
|
||||
"version": "0.18.20",
|
||||
"resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz",
|
||||
"integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==",
|
||||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"bin": {
|
||||
"esbuild": "bin/esbuild"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@esbuild/android-arm": "0.18.20",
|
||||
"@esbuild/android-arm64": "0.18.20",
|
||||
"@esbuild/android-x64": "0.18.20",
|
||||
"@esbuild/darwin-arm64": "0.18.20",
|
||||
"@esbuild/darwin-x64": "0.18.20",
|
||||
"@esbuild/freebsd-arm64": "0.18.20",
|
||||
"@esbuild/freebsd-x64": "0.18.20",
|
||||
"@esbuild/linux-arm": "0.18.20",
|
||||
"@esbuild/linux-arm64": "0.18.20",
|
||||
"@esbuild/linux-ia32": "0.18.20",
|
||||
"@esbuild/linux-loong64": "0.18.20",
|
||||
"@esbuild/linux-mips64el": "0.18.20",
|
||||
"@esbuild/linux-ppc64": "0.18.20",
|
||||
"@esbuild/linux-riscv64": "0.18.20",
|
||||
"@esbuild/linux-s390x": "0.18.20",
|
||||
"@esbuild/linux-x64": "0.18.20",
|
||||
"@esbuild/netbsd-x64": "0.18.20",
|
||||
"@esbuild/openbsd-x64": "0.18.20",
|
||||
"@esbuild/sunos-x64": "0.18.20",
|
||||
"@esbuild/win32-arm64": "0.18.20",
|
||||
"@esbuild/win32-ia32": "0.18.20",
|
||||
"@esbuild/win32-x64": "0.18.20"
|
||||
}
|
||||
},
|
||||
"node_modules/fsevents": {
|
||||
"version": "2.3.3",
|
||||
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz",
|
||||
"integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==",
|
||||
"dev": true,
|
||||
"hasInstallScript": true,
|
||||
"optional": true,
|
||||
"os": [
|
||||
"darwin"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/get-tsconfig": {
|
||||
"version": "4.7.2",
|
||||
"resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.2.tgz",
|
||||
"integrity": "sha512-wuMsz4leaj5hbGgg4IvDU0bqJagpftG5l5cXIAvo8uZrqn0NJqwtfupTN00VnkQJPcIRrxYrm1Ue24btpCha2A==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"resolve-pkg-maps": "^1.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/privatenumber/get-tsconfig?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/ollama-node": {
|
||||
"version": "0.1.27",
|
||||
"resolved": "https://registry.npmjs.org/ollama-node/-/ollama-node-0.1.27.tgz",
|
||||
"integrity": "sha512-tFABPf5P0sXCR5USA31E3tqbge5h/4uf/t5j8/rPvHDo0SDwXeN0kah2J7hIqqkYlO1vLRs0uLC1/Mprgv9t2g==",
|
||||
"dependencies": {
|
||||
"@types/node": "^20.8.4"
|
||||
}
|
||||
},
|
||||
"node_modules/resolve-pkg-maps": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
|
||||
"integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==",
|
||||
"dev": true,
|
||||
"funding": {
|
||||
"url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/source-map": {
|
||||
"version": "0.6.1",
|
||||
"resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
|
||||
"integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/source-map-support": {
|
||||
"version": "0.5.21",
|
||||
"resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
|
||||
"integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"buffer-from": "^1.0.0",
|
||||
"source-map": "^0.6.0"
|
||||
}
|
||||
},
|
||||
"node_modules/tsx": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/tsx/-/tsx-4.1.2.tgz",
|
||||
"integrity": "sha512-1spM1bFV6MP2s4tO4tDC7g52fsaFdtEWdO4GfGdqi20qUgPbnAJqixOyIAvCSx1DDj3YIUB4CD06owTWUsOAuQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"esbuild": "~0.18.20",
|
||||
"get-tsconfig": "^4.7.2",
|
||||
"source-map-support": "^0.5.21"
|
||||
},
|
||||
"bin": {
|
||||
"tsx": "dist/cli.mjs"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"fsevents": "~2.3.3"
|
||||
}
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.2.2",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz",
|
||||
"integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==",
|
||||
"dev": true,
|
||||
"bin": {
|
||||
"tsc": "bin/tsc",
|
||||
"tsserver": "bin/tsserver"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14.17"
|
||||
}
|
||||
},
|
||||
"node_modules/undici-types": {
|
||||
"version": "5.26.5",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
||||
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
|
||||
}
|
||||
}
|
||||
}
|
9
examples/typescript-functioncalling/package.json
Normal file
9
examples/typescript-functioncalling/package.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"ollama-node": "^0.1.27"
|
||||
},
|
||||
"devDependencies": {
|
||||
"tsx": "^4.1.2",
|
||||
"typescript": "^5.2.2"
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user