Compare commits
1394 Commits
v0.1.13
...
royh-opena
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9357570d59 | ||
|
|
c39761c552 | ||
|
|
aac367636d | ||
|
|
15a687ae4b | ||
|
|
d528e1af75 | ||
|
|
cd234ce22c | ||
|
|
94618b2365 | ||
|
|
1fd236d177 | ||
|
|
e87fc7200d | ||
|
|
20b9f8e6f4 | ||
|
|
c69bc19e46 | ||
|
|
bba5d177aa | ||
|
|
c16f8af911 | ||
|
|
217f60c3d9 | ||
|
|
7bdcd1da94 | ||
|
|
ead259d877 | ||
|
|
2ff45d571d | ||
|
|
0f3cf1d42e | ||
|
|
5bc029c529 | ||
|
|
e9a9c6a8e8 | ||
|
|
515f497e6d | ||
|
|
b27268aaef | ||
|
|
f5f245cc15 | ||
|
|
94d37fdcae | ||
|
|
b84aea1685 | ||
|
|
896495de7b | ||
|
|
5528dd9d11 | ||
|
|
943172cbf4 | ||
|
|
85169e8d6f | ||
|
|
34f142797a | ||
|
|
46a7f1e74a | ||
|
|
620d5c569e | ||
|
|
b9ce7bf75e | ||
|
|
cddc63381c | ||
|
|
385a32ecb5 | ||
|
|
030e765e76 | ||
|
|
ab8c929e20 | ||
|
|
ce0dc33cb8 | ||
|
|
78f81fc0e5 | ||
|
|
9b6c2e6eb6 | ||
|
|
1a29e9a879 | ||
|
|
4bf1da4944 | ||
|
|
de5beb06b3 | ||
|
|
98e65929dc | ||
|
|
66ab48772f | ||
|
|
22fcf8f7de | ||
|
|
28c7813ac4 | ||
|
|
1d8616d30f | ||
|
|
d61ef8b954 | ||
|
|
89d9900152 | ||
|
|
4a048715b6 | ||
|
|
6297f85606 | ||
|
|
ed56428dd7 | ||
|
|
ad40b92b6a | ||
|
|
8ce4032e72 | ||
|
|
42660466f8 | ||
|
|
e919f6811f | ||
|
|
bf7edb0d5d | ||
|
|
f38353d6b9 | ||
|
|
201d853fdf | ||
|
|
e40145a39d | ||
|
|
c895a7d13f | ||
|
|
dad7a987ae | ||
|
|
8ffb51749f | ||
|
|
55f6eba049 | ||
|
|
04f3c12bb7 | ||
|
|
60323e0805 | ||
|
|
d4a86102fd | ||
|
|
476fb8e892 | ||
|
|
829ff87bd1 | ||
|
|
f6b622c4b3 | ||
|
|
2e4da8eec2 | ||
|
|
763bb65dbb | ||
|
|
7ca9605f54 | ||
|
|
eb2c443a79 | ||
|
|
278e25ea44 | ||
|
|
a50a87a7b8 | ||
|
|
98085015d5 | ||
|
|
bf54c845e9 | ||
|
|
c365f195a8 | ||
|
|
e91d0ef737 | ||
|
|
22f5c12ced | ||
|
|
298c996e54 | ||
|
|
0fc0cfc6d2 | ||
|
|
914f68f021 | ||
|
|
bd1d119ba9 | ||
|
|
a03be18189 | ||
|
|
96bc232b43 | ||
|
|
bca7b12284 | ||
|
|
32cb1960c1 | ||
|
|
de781b37c8 | ||
|
|
3e21799377 | ||
|
|
26a00a0410 | ||
|
|
646371f56d | ||
|
|
1f5008544b | ||
|
|
45cbfc5aee | ||
|
|
6d423b383b | ||
|
|
ad897080a2 | ||
|
|
b7d316d98d | ||
|
|
d7339fad52 | ||
|
|
92c81e8117 | ||
|
|
9db0996ed4 | ||
|
|
6f43898b17 | ||
|
|
7487229c34 | ||
|
|
8a8e7afa96 | ||
|
|
c79f8c9c39 | ||
|
|
485016bfbb | ||
|
|
0165ba1651 | ||
|
|
c4209d6d21 | ||
|
|
6adca97f37 | ||
|
|
9a3c8003c8 | ||
|
|
d51f15257c | ||
|
|
8f440d579a | ||
|
|
4cc3be3035 | ||
|
|
db2ffa79f1 | ||
|
|
afd2b058b4 | ||
|
|
fd5971be0b | ||
|
|
89bf98bcf2 | ||
|
|
1b2d156094 | ||
|
|
714adb8bd1 | ||
|
|
95b1133d0c | ||
|
|
b37b496a12 | ||
|
|
d6f692ad1a | ||
|
|
f77713bf1f | ||
|
|
38255d2af1 | ||
|
|
73630a7e85 | ||
|
|
955c317cab | ||
|
|
9f18b88a06 | ||
|
|
353f83a9c7 | ||
|
|
3bade04e10 | ||
|
|
a6d0f443eb | ||
|
|
96236b7968 | ||
|
|
4434d7f447 | ||
|
|
171eb040fc | ||
|
|
3591bbe56f | ||
|
|
34d5ef29b3 | ||
|
|
bbbd9f20f3 | ||
|
|
547132e820 | ||
|
|
2d315ba9a9 | ||
|
|
d355d2020f | ||
|
|
c8cf0d94ed | ||
|
|
4730762e5c | ||
|
|
d88582dffd | ||
|
|
2f81b3dce2 | ||
|
|
5cab13739e | ||
|
|
8aadad9c72 | ||
|
|
807d092761 | ||
|
|
f36f1d6be9 | ||
|
|
8800c8a59b | ||
|
|
b4dce13309 | ||
|
|
e15307fdf4 | ||
|
|
3520c0e4d5 | ||
|
|
ccdf0b2a44 | ||
|
|
63a453554d | ||
|
|
105186aa17 | ||
|
|
ba04afc9a4 | ||
|
|
7e1e0086e7 | ||
|
|
02b31c9dc8 | ||
|
|
7f2fbad736 | ||
|
|
5bece94509 | ||
|
|
3d90156e99 | ||
|
|
5e46c5c435 | ||
|
|
583c1f472c | ||
|
|
26bfc1c443 | ||
|
|
799aa9883c | ||
|
|
84ed77cbd8 | ||
|
|
c9e584fb90 | ||
|
|
17b1e81ca1 | ||
|
|
7e9a2da097 | ||
|
|
c48c1d7c46 | ||
|
|
d1692fd3e0 | ||
|
|
5fa36a0833 | ||
|
|
853ae490e1 | ||
|
|
f2cf97d6f1 | ||
|
|
c344da4c5a | ||
|
|
85a57006d1 | ||
|
|
c5e892cb3e | ||
|
|
81fb06f530 | ||
|
|
a385382ff5 | ||
|
|
b8772a353f | ||
|
|
c2714fcbfd | ||
|
|
a2fc933fed | ||
|
|
0e331c7168 | ||
|
|
ac145f75ca | ||
|
|
a4b8d1f89a | ||
|
|
798b107f19 | ||
|
|
6a1b471365 | ||
|
|
ec231a7923 | ||
|
|
7ca71a6b0f | ||
|
|
7607e6e902 | ||
|
|
f1548ef62d | ||
|
|
6845988807 | ||
|
|
9eed4a90ce | ||
|
|
f8464785a6 | ||
|
|
1d359e737e | ||
|
|
50b9056e09 | ||
|
|
91a090a485 | ||
|
|
9c76b30d72 | ||
|
|
93f19910c5 | ||
|
|
4ec7445a6f | ||
|
|
0372c51f82 | ||
|
|
0fec3525ad | ||
|
|
41ba3017fd | ||
|
|
8080fbce35 | ||
|
|
ec14f6ceda | ||
|
|
c60a086635 | ||
|
|
92ca2cca95 | ||
|
|
1e1634daca | ||
|
|
824ee5446f | ||
|
|
879e2caf8c | ||
|
|
c4014e73a2 | ||
|
|
be9efdb981 | ||
|
|
074dc3b9d8 | ||
|
|
86f9b582d5 | ||
|
|
4142c3ef7c | ||
|
|
6602e793c0 | ||
|
|
ea0fdaed28 | ||
|
|
1eb382da5a | ||
|
|
bb6fd02298 | ||
|
|
7e2bceceee | ||
|
|
30a7d7096c | ||
|
|
200a18820e | ||
|
|
e03637176d | ||
|
|
c02db93243 | ||
|
|
ffa4d5134a | ||
|
|
302d7fdbf3 | ||
|
|
cf442cd57e | ||
|
|
0e1ba65855 | ||
|
|
6aad333c63 | ||
|
|
4fcc84e67a | ||
|
|
3ae2f441e0 | ||
|
|
2abb3f6424 | ||
|
|
ce3b212d12 | ||
|
|
83d6d46e29 | ||
|
|
354ad9254e | ||
|
|
58876091f7 | ||
|
|
dc18eee39d | ||
|
|
8727a9c140 | ||
|
|
d0425f26cf | ||
|
|
cfa84b8470 | ||
|
|
1580ed4c06 | ||
|
|
a7ee84fc31 | ||
|
|
84ac7ce139 | ||
|
|
788b092c49 | ||
|
|
5cde17a096 | ||
|
|
c3837eb08c | ||
|
|
8cc0ee2efe | ||
|
|
d5eec16d23 | ||
|
|
daa1a032f7 | ||
|
|
6042e8bc57 | ||
|
|
920a4b0794 | ||
|
|
ee49844d09 | ||
|
|
8a516ac862 | ||
|
|
bee2f4a3b0 | ||
|
|
cef45feaa4 | ||
|
|
2687f02c96 | ||
|
|
b25976aeb8 | ||
|
|
001f167aad | ||
|
|
486a2c1d94 | ||
|
|
88cf154483 | ||
|
|
8cbd3e7510 | ||
|
|
eeb695261f | ||
|
|
dc9b1111e0 | ||
|
|
06ac829e70 | ||
|
|
72700279e2 | ||
|
|
5d3f7fff26 | ||
|
|
d77c1c5f9d | ||
|
|
2a5302a1cf | ||
|
|
ffbd3d173f | ||
|
|
1e0a669f75 | ||
|
|
527e9be058 | ||
|
|
34bea2e272 | ||
|
|
fe44ae3371 | ||
|
|
adeb40eaf2 | ||
|
|
d7d33e5255 | ||
|
|
63bc884e25 | ||
|
|
ef4e095d24 | ||
|
|
4d4f75a8a8 | ||
|
|
3f71ba406a | ||
|
|
88a67127d8 | ||
|
|
f7dc7dcc64 | ||
|
|
04f971c84b | ||
|
|
548a7df014 | ||
|
|
70edb9bc4d | ||
|
|
3f0ed03856 | ||
|
|
4736391bfb | ||
|
|
7c5330413b | ||
|
|
39d9d22ca3 | ||
|
|
af47413dba | ||
|
|
b2f00aa977 | ||
|
|
6694be5e50 | ||
|
|
f5e8b207fb | ||
|
|
d245460362 | ||
|
|
4d0d0fa383 | ||
|
|
7ffe45734d | ||
|
|
01811c176a | ||
|
|
a7248f6ea8 | ||
|
|
9685c34509 | ||
|
|
d091fe3c21 | ||
|
|
ee02f548c8 | ||
|
|
b08870aff3 | ||
|
|
3ecae420ac | ||
|
|
4cbbf0e13b | ||
|
|
380378cc80 | ||
|
|
0963c65027 | ||
|
|
ed740a2504 | ||
|
|
c9f98622b1 | ||
|
|
0a954e5066 | ||
|
|
aa93423fbf | ||
|
|
01c9386267 | ||
|
|
af9eb36f9f | ||
|
|
06093fd396 | ||
|
|
86b7fcac32 | ||
|
|
fb8ddc564e | ||
|
|
242efe6611 | ||
|
|
1b0e6c9c0e | ||
|
|
dfa2f32ca0 | ||
|
|
840424a2c4 | ||
|
|
f56aa20014 | ||
|
|
6707768ebd | ||
|
|
c78bb76a12 | ||
|
|
942c979232 | ||
|
|
06164911dd | ||
|
|
2a21363bb7 | ||
|
|
026869915f | ||
|
|
45d61aaaa3 | ||
|
|
20f6c06569 | ||
|
|
371f5e52aa | ||
|
|
e006480e49 | ||
|
|
aed545872d | ||
|
|
44869c59d6 | ||
|
|
52663284cf | ||
|
|
42fa9d7f0a | ||
|
|
b7a87a22b6 | ||
|
|
e8aaea030e | ||
|
|
b1ad3a43cb | ||
|
|
267e25a750 | ||
|
|
9a32c514cb | ||
|
|
e9ae607ece | ||
|
|
93707fa3f2 | ||
|
|
94c369095f | ||
|
|
9164b0161b | ||
|
|
e592e8fccb | ||
|
|
bf4fc25f7b | ||
|
|
5b806d8d24 | ||
|
|
cb1e072643 | ||
|
|
45b6a12e45 | ||
|
|
68755f1f5e | ||
|
|
997a455039 | ||
|
|
88775e1ff9 | ||
|
|
8867e744ff | ||
|
|
4fd064bea6 | ||
|
|
59fbceedcc | ||
|
|
321d57e1a0 | ||
|
|
ba26c7aa00 | ||
|
|
63c763685f | ||
|
|
34a4a94f13 | ||
|
|
f4a73d57a4 | ||
|
|
948114e3e3 | ||
|
|
a3e60d9058 | ||
|
|
8acb233668 | ||
|
|
119589fcb3 | ||
|
|
5ea844964e | ||
|
|
bd8eed57fc | ||
|
|
9cf0f2e973 | ||
|
|
176ad3aa6e | ||
|
|
4d08363580 | ||
|
|
8907bf51d2 | ||
|
|
abe614c705 | ||
|
|
238715037d | ||
|
|
c0a00f68ae | ||
|
|
f0c454ab57 | ||
|
|
089daaeabc | ||
|
|
b9f74ff3d6 | ||
|
|
fcf4d60eee | ||
|
|
e33d5c2dbc | ||
|
|
18d9a7e1f1 | ||
|
|
8488388cbd | ||
|
|
588901f449 | ||
|
|
0a7fdbe533 | ||
|
|
5950c176ca | ||
|
|
23d23409a0 | ||
|
|
9009bedf13 | ||
|
|
d4ac57e240 | ||
|
|
7b59d1770f | ||
|
|
95ead8ffba | ||
|
|
7aa08a77ca | ||
|
|
7e432cdfac | ||
|
|
586672f490 | ||
|
|
b03408de74 | ||
|
|
1e6a28bf5b | ||
|
|
d6e3b64582 | ||
|
|
114c932a8e | ||
|
|
7f7103de06 | ||
|
|
c631a9c726 | ||
|
|
8fd9e56804 | ||
|
|
8a65717f55 | ||
|
|
6d3152a98a | ||
|
|
b438d485f1 | ||
|
|
204349b17b | ||
|
|
86e67fc4a9 | ||
|
|
2bed62926e | ||
|
|
aad8d128a0 | ||
|
|
ec1acbb867 | ||
|
|
e4859c4563 | ||
|
|
8e30eb26bd | ||
|
|
0b5c589ca2 | ||
|
|
65fadddc85 | ||
|
|
ed5fb088c4 | ||
|
|
f81f308118 | ||
|
|
b1390a7b37 | ||
|
|
11d83386a5 | ||
|
|
bb31def011 | ||
|
|
41e03ede95 | ||
|
|
7fea1ecdf6 | ||
|
|
054894271d | ||
|
|
6fef042f0b | ||
|
|
5c0c2d1d09 | ||
|
|
37f9c8ad99 | ||
|
|
2a80f55e2a | ||
|
|
421c878a2d | ||
|
|
36666c2142 | ||
|
|
85801317d1 | ||
|
|
2ed0d65948 | ||
|
|
d459dc4ad1 | ||
|
|
40bc4622ef | ||
|
|
c0f818a07a | ||
|
|
8671fdeda6 | ||
|
|
2619850fb4 | ||
|
|
8feb97dc0d | ||
|
|
4e1ff6dcbb | ||
|
|
8589d752ac | ||
|
|
de4ded68b0 | ||
|
|
9b5a3c5991 | ||
|
|
00b0699c75 | ||
|
|
993cf8bf55 | ||
|
|
7bb7cb8a60 | ||
|
|
b123be5b71 | ||
|
|
ddf5c09a9b | ||
|
|
5f73c08729 | ||
|
|
f503a848c2 | ||
|
|
36a6daccab | ||
|
|
ceb0e26e5e | ||
|
|
284e02bed0 | ||
|
|
3450a57d4a | ||
|
|
592dae31c8 | ||
|
|
2010cbc5fa | ||
|
|
ac0801eced | ||
|
|
ad66e5b060 | ||
|
|
ade4b55520 | ||
|
|
a6d62e0617 | ||
|
|
6e76348df7 | ||
|
|
0d6687f84c | ||
|
|
74d2a9ef9a | ||
|
|
14476d48cc | ||
|
|
ce8ce82567 | ||
|
|
4dc4f1be34 | ||
|
|
16b52331a4 | ||
|
|
5445aaa94e | ||
|
|
2ac3dd6853 | ||
|
|
d8851cb7a0 | ||
|
|
058f6cd2cc | ||
|
|
790cf34d17 | ||
|
|
928d844896 | ||
|
|
939d6a8606 | ||
|
|
58888a74bc | ||
|
|
cc5a71e0e3 | ||
|
|
e83bcf7f9a | ||
|
|
5690e5ce99 | ||
|
|
f2ea8470e5 | ||
|
|
34b9db5afc | ||
|
|
8711d03df7 | ||
|
|
ee448deaba | ||
|
|
6e8db04716 | ||
|
|
658e60cf73 | ||
|
|
4c78f028f8 | ||
|
|
435cc866a3 | ||
|
|
c7d3a558f6 | ||
|
|
089cdb2877 | ||
|
|
ea1e9aa36b | ||
|
|
d0d28ef90d | ||
|
|
6654186a7c | ||
|
|
aa72281eae | ||
|
|
74bcbf828f | ||
|
|
fe39147e64 | ||
|
|
fad00a85e5 | ||
|
|
9c0db4cc83 | ||
|
|
62be2050dd | ||
|
|
56f8aa6912 | ||
|
|
e6f9bfc0e8 | ||
|
|
6f18297b3a | ||
|
|
15016413de | ||
|
|
440b7190ed | ||
|
|
8d1995c625 | ||
|
|
fd01fbf038 | ||
|
|
0408205c1c | ||
|
|
63a7edd771 | ||
|
|
554ffdcce3 | ||
|
|
c496967e56 | ||
|
|
9850a4ce08 | ||
|
|
3934c15895 | ||
|
|
fd048f1367 | ||
|
|
8645076a71 | ||
|
|
05e9424824 | ||
|
|
52ebe67a98 | ||
|
|
889b31ab78 | ||
|
|
3cf483fe48 | ||
|
|
8dca03173d | ||
|
|
85bdf14b56 | ||
|
|
d524e5ef5e | ||
|
|
52f5370c48 | ||
|
|
da8a0c7657 | ||
|
|
1b42b4b59a | ||
|
|
7c000ec3ed | ||
|
|
c8afe7168c | ||
|
|
28d3cd0148 | ||
|
|
eb5554232a | ||
|
|
ea4c284a48 | ||
|
|
2bdc320216 | ||
|
|
32561aed09 | ||
|
|
71548d9829 | ||
|
|
8aec92fa6d | ||
|
|
a8b9b930b4 | ||
|
|
9755cf9173 | ||
|
|
70261b9bb6 | ||
|
|
c942e4a07b | ||
|
|
bd54b08261 | ||
|
|
9df6c85c3a | ||
|
|
e74163af4c | ||
|
|
fb9580df85 | ||
|
|
26df674785 | ||
|
|
7c9792a6e0 | ||
|
|
7afb2e125a | ||
|
|
41a272de9f | ||
|
|
f335722275 | ||
|
|
6d53b67c2c | ||
|
|
969238b19e | ||
|
|
949d7832cf | ||
|
|
99d227c9db | ||
|
|
a27e419b47 | ||
|
|
e4d0db5a97 | ||
|
|
ba460802c2 | ||
|
|
e54a3c7fcd | ||
|
|
9f8691c6c8 | ||
|
|
a0b8a32eb4 | ||
|
|
7027f264fb | ||
|
|
9bee3b63b1 | ||
|
|
309aef7fee | ||
|
|
08655170aa | ||
|
|
2b341069a7 | ||
|
|
c00fee6936 | ||
|
|
c2d813bdc3 | ||
|
|
786f3a1c44 | ||
|
|
3397eff0cd | ||
|
|
0efb7931c7 | ||
|
|
42f2cc408e | ||
|
|
9446b795b5 | ||
|
|
62f8cda3b3 | ||
|
|
6a1de23175 | ||
|
|
a7b431e743 | ||
|
|
5a25f93522 | ||
|
|
7e33a017c0 | ||
|
|
8b2c10061c | ||
|
|
c5c451ca3b | ||
|
|
2b4ca6cf36 | ||
|
|
ad90b9ab3d | ||
|
|
4340f8eba4 | ||
|
|
4c7db6b7e9 | ||
|
|
c03f0e3c3d | ||
|
|
c5ff443b9f | ||
|
|
01114b4526 | ||
|
|
1524f323a3 | ||
|
|
fccf3eecaa | ||
|
|
c77d45d836 | ||
|
|
5ec12cec6c | ||
|
|
d9578d2bad | ||
|
|
cb8352d6b4 | ||
|
|
fc6558f47f | ||
|
|
9502e5661f | ||
|
|
e1c9a2a00f | ||
|
|
1341ee1b56 | ||
|
|
63efa075a0 | ||
|
|
cb03fc9571 | ||
|
|
a5ec9cfc0f | ||
|
|
be517e491c | ||
|
|
fc8e108642 | ||
|
|
c5d5c4a96c | ||
|
|
dfe330fa1c | ||
|
|
01f77ae25d | ||
|
|
483b81a863 | ||
|
|
36bd967722 | ||
|
|
b0e7d35db8 | ||
|
|
aeb1fb5192 | ||
|
|
a2e60ebcaf | ||
|
|
883ec4d1ef | ||
|
|
4de0126719 | ||
|
|
9768e2dc75 | ||
|
|
08600d5bec | ||
|
|
a624e672d2 | ||
|
|
e4a7e5b2ca | ||
|
|
a0a15cfd5b | ||
|
|
12e923e158 | ||
|
|
cd135317d2 | ||
|
|
4f895d633f | ||
|
|
7d05a6ee8f | ||
|
|
464d817824 | ||
|
|
531324a9be | ||
|
|
6589eb8a8c | ||
|
|
90f071c658 | ||
|
|
a039e383cd | ||
|
|
80163ebcb5 | ||
|
|
a57818d93e | ||
|
|
841adda157 | ||
|
|
0035e31af8 | ||
|
|
c863c6a96d | ||
|
|
1f11b52511 | ||
|
|
526d4eb204 | ||
|
|
0a74cb31d5 | ||
|
|
10ed1b6292 | ||
|
|
4fec5816d6 | ||
|
|
0a0e9f3e0f | ||
|
|
58d95cc9bd | ||
|
|
3b6a9154dd | ||
|
|
d6dd2ff839 | ||
|
|
e57a6ba89f | ||
|
|
12ec2346ef | ||
|
|
1ec0df1069 | ||
|
|
91b3e4d282 | ||
|
|
d338d70492 | ||
|
|
011bb67351 | ||
|
|
d124627202 | ||
|
|
b0a8246a69 | ||
|
|
e6fb39c182 | ||
|
|
e1f1c374ea | ||
|
|
06a1508bfe | ||
|
|
5a5efee46b | ||
|
|
97ae517fbf | ||
|
|
44b813e459 | ||
|
|
539043f5e0 | ||
|
|
dbcace6847 | ||
|
|
c91a4ebcff | ||
|
|
b79c7e4528 | ||
|
|
035b274b70 | ||
|
|
9c6a254945 | ||
|
|
f31f2bedf4 | ||
|
|
756c257553 | ||
|
|
5255d0af8a | ||
|
|
af8a8a6b59 | ||
|
|
461ad25015 | ||
|
|
8838ae787d | ||
|
|
db75402ade | ||
|
|
1e85a140a3 | ||
|
|
c363282fdc | ||
|
|
5b0c48d29e | ||
|
|
913306f4fd | ||
|
|
f5ca7f8c8e | ||
|
|
856b8ec131 | ||
|
|
1b272d5bcd | ||
|
|
29715dbca7 | ||
|
|
54a028d07f | ||
|
|
f83e4db365 | ||
|
|
3b5866a233 | ||
|
|
b8c2be6142 | ||
|
|
e0319bd78d | ||
|
|
b31ed7f031 | ||
|
|
5dacc1ebe8 | ||
|
|
c2712b5566 | ||
|
|
8091ef2eeb | ||
|
|
f38b705dc7 | ||
|
|
560be5e0b6 | ||
|
|
4a1c76b3aa | ||
|
|
28a64e23ca | ||
|
|
92d74e2f59 | ||
|
|
6f8f57dd1d | ||
|
|
b2fa68b0ea | ||
|
|
3767d5ef0d | ||
|
|
9fed85bc8b | ||
|
|
4501bc0913 | ||
|
|
57ba519e63 | ||
|
|
d98d322d24 | ||
|
|
0c3ec74cf1 | ||
|
|
42ae8359fa | ||
|
|
e4b76dfb76 | ||
|
|
2c56517494 | ||
|
|
cfbc1b152b | ||
|
|
9305ac1b2e | ||
|
|
45d6292959 | ||
|
|
22921a3969 | ||
|
|
7b6cbc10ec | ||
|
|
dfc6721b20 | ||
|
|
acfa2b9422 | ||
|
|
2c390a73ac | ||
|
|
3e30c75f3e | ||
|
|
7e430ff352 | ||
|
|
1784113ef5 | ||
|
|
949b6c01e0 | ||
|
|
38daf0a252 | ||
|
|
43799532c1 | ||
|
|
d8fdbfd8da | ||
|
|
a5ba0fcf78 | ||
|
|
3a30bf56dc | ||
|
|
a1c0a48524 | ||
|
|
74788b487c | ||
|
|
7ed3e94105 | ||
|
|
2297ad39da | ||
|
|
01cff6136d | ||
|
|
3c4ad0ecab | ||
|
|
22f326464e | ||
|
|
e95ffc7448 | ||
|
|
2dce1ab40b | ||
|
|
f4b31c2d53 | ||
|
|
ab3456207b | ||
|
|
6ad414f31e | ||
|
|
052b5a3b77 | ||
|
|
d4c10df2b0 | ||
|
|
540f4af45f | ||
|
|
6ce37e4d96 | ||
|
|
703684a82a | ||
|
|
6459377ae0 | ||
|
|
8546dd3d72 | ||
|
|
87100be5e0 | ||
|
|
e87c780ff9 | ||
|
|
291c663865 | ||
|
|
da20786e3e | ||
|
|
5ce997a7b9 | ||
|
|
672ffe9b7d | ||
|
|
47cfe58af5 | ||
|
|
c1a81c6fe3 | ||
|
|
152ab524c2 | ||
|
|
e72c567cfd | ||
|
|
3e22611200 | ||
|
|
a54d4a28dc | ||
|
|
82b0c7c27e | ||
|
|
ba7cf7fb66 | ||
|
|
2f804068bd | ||
|
|
85129d3a32 | ||
|
|
9ac6440da3 | ||
|
|
0085297928 | ||
|
|
34d00f90b1 | ||
|
|
b53229a2ed | ||
|
|
53c107e20e | ||
|
|
51578d8573 | ||
|
|
b5fcd9d3aa | ||
|
|
b80661e8c7 | ||
|
|
6d3adfbea2 | ||
|
|
369eda65f5 | ||
|
|
f878e91070 | ||
|
|
0d651478e4 | ||
|
|
9ea492f1ce | ||
|
|
bc13da2bfe | ||
|
|
41b00b9856 | ||
|
|
c2a8ed48e7 | ||
|
|
3dc1bb6a35 | ||
|
|
7865a6996a | ||
|
|
00ec269321 | ||
|
|
908005d90b | ||
|
|
cdf65e793f | ||
|
|
82ca694d68 | ||
|
|
5017a15bcb | ||
|
|
e11668aa07 | ||
|
|
0bd0f4a29c | ||
|
|
1ffb1e2874 | ||
|
|
0a7844413c | ||
|
|
f9cd55c70b | ||
|
|
0fdebb34a9 | ||
|
|
ac64cd4ef9 | ||
|
|
4a5c9b8035 | ||
|
|
efe5617b64 | ||
|
|
5b3fad9636 | ||
|
|
bfec2c6e10 | ||
|
|
5c143af726 | ||
|
|
6c0af2599e | ||
|
|
fc8c044584 | ||
|
|
ecc133d843 | ||
|
|
76bdebbadf | ||
|
|
18979ad4a1 | ||
|
|
8e0ef931d8 | ||
|
|
280da44522 | ||
|
|
0cebc79cba | ||
|
|
0e4669b04f | ||
|
|
b886bec3f9 | ||
|
|
fc06205971 | ||
|
|
2ada81e068 | ||
|
|
b1e74d4fda | ||
|
|
f678f5c5c3 | ||
|
|
2cb74e23fb | ||
|
|
69f0227813 | ||
|
|
3c8df3808b | ||
|
|
7d564835c2 | ||
|
|
72431031d9 | ||
|
|
6041abb5b2 | ||
|
|
6c5ccb11f9 | ||
|
|
2e20110e50 | ||
|
|
82ddc3e441 | ||
|
|
d481fb3cc8 | ||
|
|
23ee633252 | ||
|
|
23ebe8fe11 | ||
|
|
2c017ca441 | ||
|
|
be330174dd | ||
|
|
0ded7fdc4b | ||
|
|
2103a5073c | ||
|
|
ce9f7c4674 | ||
|
|
e5596c1944 | ||
|
|
9bc3fee694 | ||
|
|
21347e1ed6 | ||
|
|
3b4bab3dc5 | ||
|
|
cbd6e3b38e | ||
|
|
b830afa716 | ||
|
|
bd1d8b0d14 | ||
|
|
25c2912120 | ||
|
|
0e19476b56 | ||
|
|
fa2f2b3563 | ||
|
|
cbf4970e0f | ||
|
|
74468513bd | ||
|
|
794a916a72 | ||
|
|
76e5d9ec88 | ||
|
|
076237b8ea | ||
|
|
53d694c67f | ||
|
|
5aa6bfea94 | ||
|
|
1cde63dd64 | ||
|
|
98e0b7e94f | ||
|
|
061e8f6abc | ||
|
|
a189810df6 | ||
|
|
e95b896790 | ||
|
|
1f087c4d26 | ||
|
|
5d7ea6616f | ||
|
|
2a4b128ae3 | ||
|
|
fc483274ad | ||
|
|
fd10a2ad4b | ||
|
|
b291f63188 | ||
|
|
f58856bf6f | ||
|
|
275ea01587 | ||
|
|
8782dd5628 | ||
|
|
11bfff8ee1 | ||
|
|
7c0167a8f6 | ||
|
|
74d898e37d | ||
|
|
c6e8b00718 | ||
|
|
be9980ef13 | ||
|
|
646a0dedb9 | ||
|
|
7f964d938c | ||
|
|
e6b8a139ff | ||
|
|
bdc0ea1ba5 | ||
|
|
7fab7918cc | ||
|
|
74c1bdba0d | ||
|
|
f983ef7f5f | ||
|
|
1ae1c33651 | ||
|
|
084d846621 | ||
|
|
6a4b994433 | ||
|
|
bea007deb7 | ||
|
|
074934be03 | ||
|
|
0de12368a0 | ||
|
|
917bd61084 | ||
|
|
efe040f8c0 | ||
|
|
2a7553ce09 | ||
|
|
10af6070a9 | ||
|
|
92423b0600 | ||
|
|
b3eac61cac | ||
|
|
287ba11500 | ||
|
|
63861f58cc | ||
|
|
f0425d3de9 | ||
|
|
210b65268e | ||
|
|
949d7b1c48 | ||
|
|
897b213468 | ||
|
|
4613a080e7 | ||
|
|
ace2cdf1c6 | ||
|
|
eed92bc19a | ||
|
|
e0a2f46466 | ||
|
|
01ff2e14db | ||
|
|
199e79ec0c | ||
|
|
8125ce4cb6 | ||
|
|
636d6eea99 | ||
|
|
df56f1ee5e | ||
|
|
0b6c6c9092 | ||
|
|
cb60389de7 | ||
|
|
ce0c95d097 | ||
|
|
a9bc1e1c37 | ||
|
|
62c71f4cb1 | ||
|
|
41aca5c2d0 | ||
|
|
753724d867 | ||
|
|
e4576c2ee1 | ||
|
|
9a7a4b9533 | ||
|
|
2653191222 | ||
|
|
b338c0635f | ||
|
|
4fcbf1cde6 | ||
|
|
9220b4fa91 | ||
|
|
fc39a6cd7a | ||
|
|
1e23e82324 | ||
|
|
f9fd08040b | ||
|
|
4318e35ee3 | ||
|
|
9754c6d9d8 | ||
|
|
a497235a55 | ||
|
|
df6dc4fd96 | ||
|
|
88622847c6 | ||
|
|
9774663013 | ||
|
|
a468ae0459 | ||
|
|
c3e62ba38a | ||
|
|
117369aa73 | ||
|
|
1ba734de67 | ||
|
|
5208cf09b1 | ||
|
|
bb9de6037c | ||
|
|
272e53a1f5 | ||
|
|
db2a9ad1fe | ||
|
|
c9ab1aead3 | ||
|
|
4a10e7a7fa | ||
|
|
86808f80a8 | ||
|
|
4240b045e6 | ||
|
|
e547378893 | ||
|
|
fd77dbec4d | ||
|
|
fefb3e77d1 | ||
|
|
ed5489a96e | ||
|
|
76113742cf | ||
|
|
57e60c836f | ||
|
|
622b1f3e67 | ||
|
|
7ad9844ac0 | ||
|
|
e43648afe5 | ||
|
|
823a520266 | ||
|
|
66ef308abd | ||
|
|
29e90cc13b | ||
|
|
f397e0e988 | ||
|
|
9da9e8fb72 | ||
|
|
42e77e2a69 | ||
|
|
9241a29336 | ||
|
|
f7231ad9ad | ||
|
|
6920964b87 | ||
|
|
2f9ed52bbd | ||
|
|
caf2b13c10 | ||
|
|
1d263449ff | ||
|
|
48a273f80b | ||
|
|
939c60473f | ||
|
|
f76ca04f9e | ||
|
|
76b8728f0c | ||
|
|
1f9078d6ae | ||
|
|
6d84f07505 | ||
|
|
26b13fc33c | ||
|
|
1c8435ffa9 | ||
|
|
6680761596 | ||
|
|
42b797ed9c | ||
|
|
336aa43f3c | ||
|
|
69f392c9b7 | ||
|
|
a1dfab43b9 | ||
|
|
a0a199b108 | ||
|
|
ab0d37fde4 | ||
|
|
14e71350c8 | ||
|
|
453f572f83 | ||
|
|
c9dfa6e571 | ||
|
|
3dcbcd367d | ||
|
|
e805ac1d59 | ||
|
|
b9229ffca5 | ||
|
|
46c847c4ad | ||
|
|
92b1a21f79 | ||
|
|
de76b95dd4 | ||
|
|
59ec837ef6 | ||
|
|
f06b99a461 | ||
|
|
128fce5495 | ||
|
|
27aa2d4a19 | ||
|
|
b9f91a0b36 | ||
|
|
b538dc3858 | ||
|
|
f0e9496c85 | ||
|
|
09a6f76f4c | ||
|
|
e135167484 | ||
|
|
38296ab352 | ||
|
|
f43dea68d1 | ||
|
|
e1f50377f4 | ||
|
|
7913104527 | ||
|
|
bfbf2f7cf7 | ||
|
|
fe3cbd014f | ||
|
|
3d6f48507a | ||
|
|
f3761405c8 | ||
|
|
e49dc9f3d8 | ||
|
|
d125510b4b | ||
|
|
1ca386aa9e | ||
|
|
fb56988014 | ||
|
|
d046bee790 | ||
|
|
f11bf0740b | ||
|
|
8450bf66e6 | ||
|
|
b4e11be8ef | ||
|
|
a896079705 | ||
|
|
583950c828 | ||
|
|
8ac08a0eec | ||
|
|
60f47be64c | ||
|
|
6e56077ada | ||
|
|
98ae9467bb | ||
|
|
b7a24af083 | ||
|
|
c8b1f2369e | ||
|
|
72b12c3be7 | ||
|
|
0632dff3f8 | ||
|
|
509e2dec8a | ||
|
|
78a48de804 | ||
|
|
e7dbb00331 | ||
|
|
c3f9538636 | ||
|
|
2e06ed01d5 | ||
|
|
4072b5879b | ||
|
|
15562e887d | ||
|
|
f2245c7c77 | ||
|
|
e4b9b72f2a | ||
|
|
311f8e0c3f | ||
|
|
f07f8b7a9e | ||
|
|
4c4c730a0a | ||
|
|
e02ecfb6c8 | ||
|
|
c8059b4dcf | ||
|
|
59d87127f5 | ||
|
|
b5cf31b460 | ||
|
|
cc4915e262 | ||
|
|
667a2ba18a | ||
|
|
e054ebe059 | ||
|
|
9d3dcfd0ec | ||
|
|
6e0ea5ecc8 | ||
|
|
a47d8b2557 | ||
|
|
30c43c285c | ||
|
|
23a7ea593b | ||
|
|
75c44aa319 | ||
|
|
9d7b5d6c91 | ||
|
|
5d9c4a5f5a | ||
|
|
197e420a97 | ||
|
|
a34e1ad3cf | ||
|
|
2ae0556292 | ||
|
|
5be9bdd444 | ||
|
|
b706794905 | ||
|
|
a8c5413d06 | ||
|
|
5580de4571 | ||
|
|
946431d5b0 | ||
|
|
0610126049 | ||
|
|
3ebd6a83fc | ||
|
|
a64570dcae | ||
|
|
7c40a67841 | ||
|
|
e64b5b07a2 | ||
|
|
9e1e295cdc | ||
|
|
6eb3cddcb6 | ||
|
|
a4564232a4 | ||
|
|
a643823f86 | ||
|
|
8e5d359a03 | ||
|
|
a170888dd4 | ||
|
|
cd22855ef8 | ||
|
|
013fd07139 | ||
|
|
f63dc2db5c | ||
|
|
eaa5a396d9 | ||
|
|
8ed22f5d72 | ||
|
|
987c16b2f7 | ||
|
|
950f636d64 | ||
|
|
4458efb73a | ||
|
|
ceea599494 | ||
|
|
3005ec74b3 | ||
|
|
0759d8996e | ||
|
|
0f5b843319 | ||
|
|
ffaf52e1e9 | ||
|
|
940b10b036 | ||
|
|
3bc28736cd | ||
|
|
93a756266c | ||
|
|
a0a829bf7a | ||
|
|
730dcfcc7a | ||
|
|
27a2d5af54 | ||
|
|
5f81a33f43 | ||
|
|
6225fde046 | ||
|
|
069184562b | ||
|
|
5576bb2348 | ||
|
|
2738837786 | ||
|
|
ec3764538d | ||
|
|
df54c723ae | ||
|
|
fa8c990e58 | ||
|
|
da72235ebf | ||
|
|
89c4aee29e | ||
|
|
a447a083f2 | ||
|
|
f32ea81b21 | ||
|
|
681a914990 | ||
|
|
4c54f0ddeb | ||
|
|
c08dfaa23d | ||
|
|
3b76e736ae | ||
|
|
552db98bf1 | ||
|
|
fdcdfef620 | ||
|
|
6a042438af | ||
|
|
dc88cc3981 | ||
|
|
62976087c6 | ||
|
|
344342abdf | ||
|
|
eb76f3e379 | ||
|
|
d017e3d0a6 | ||
|
|
aac9ab4db7 | ||
|
|
1f5b7ff976 | ||
|
|
e299831e2c | ||
|
|
745b5934fa | ||
|
|
a38d88d828 | ||
|
|
abec7f06e5 | ||
|
|
e5da190bac | ||
|
|
ecbfc0182f | ||
|
|
fedd705aea | ||
|
|
82ee019bfc | ||
|
|
ad9dbc2a04 | ||
|
|
fccdf4c635 | ||
|
|
d450fb1d1e | ||
|
|
df40b11d03 | ||
|
|
9cd20b0ec8 | ||
|
|
b992bf65fc | ||
|
|
1b249748ab | ||
|
|
cbe2adc78a | ||
|
|
d5a7353357 | ||
|
|
96cfb62641 | ||
|
|
7d00b5d110 | ||
|
|
795674dd90 | ||
|
|
e282bdccdd | ||
|
|
d9bfb2f08f | ||
|
|
598d6d5572 | ||
|
|
a897e833b8 | ||
|
|
eef50accb4 | ||
|
|
05d53de7a1 | ||
|
|
8795447dad | ||
|
|
b3035112a1 | ||
|
|
95ad9a9fc8 | ||
|
|
3ca5f69ce8 | ||
|
|
cfa6337960 | ||
|
|
f4bf1d514f | ||
|
|
557110d0ba | ||
|
|
2ecb247276 | ||
|
|
288ef8ff95 | ||
|
|
4cf17990f7 | ||
|
|
27331ae3a8 | ||
|
|
b6c0ef1e70 | ||
|
|
356d178f6e | ||
|
|
eaed6f8c45 | ||
|
|
6a5bfc2ed6 | ||
|
|
cf29bd2d72 | ||
|
|
905862e17b | ||
|
|
565f8a3c44 | ||
|
|
5121b7ac9c | ||
|
|
a70262c6b2 | ||
|
|
40a0a90a88 | ||
|
|
cbe20c4375 | ||
|
|
5ffbbea1d7 | ||
|
|
3773fb6465 | ||
|
|
7427fa1387 | ||
|
|
f84537e0e0 | ||
|
|
d2be6387c9 | ||
|
|
d7af35d3d0 | ||
|
|
defc1dbd6e | ||
|
|
de2fbdec99 | ||
|
|
f5faf79aa1 | ||
|
|
f4f939de28 | ||
|
|
39928a42e8 | ||
|
|
d88c527be3 | ||
|
|
3bc8b9832b | ||
|
|
ab6be852c7 | ||
|
|
052b33b81b | ||
|
|
8da7bef05f | ||
|
|
b24e8d17b2 | ||
|
|
f83881390f | ||
|
|
ac70ab6761 | ||
|
|
3c49c3ab0d | ||
|
|
9754ae4c89 | ||
|
|
224fbf2795 | ||
|
|
2c6e8f5248 | ||
|
|
34344d801c | ||
|
|
e868c8a5c7 | ||
|
|
c336693f07 | ||
|
|
e89dc1d54b | ||
|
|
1961a81f03 | ||
|
|
8a8c7e7f8d | ||
|
|
6df83e6daa | ||
|
|
f921e2696e | ||
|
|
4a33cede20 | ||
|
|
f95d2f25f3 | ||
|
|
2b9892a808 | ||
|
|
2bb2bdd5d4 | ||
|
|
acfc376efd | ||
|
|
997253143f | ||
|
|
62023177f6 | ||
|
|
6164f378f2 | ||
|
|
f387e9631b | ||
|
|
6566387ae3 | ||
|
|
37708931fb | ||
|
|
f6cb0a553c | ||
|
|
2680078c13 | ||
|
|
f1b7e5f560 | ||
|
|
cb534e6ac2 | ||
|
|
58ce2d8273 | ||
|
|
18ddf6d57d | ||
|
|
61e6502449 | ||
|
|
08f1e18965 | ||
|
|
7e8f7c8358 | ||
|
|
3f3eb19a3b | ||
|
|
059ae4585e | ||
|
|
6347f501ca | ||
|
|
5feec959ad | ||
|
|
dbdd50b283 | ||
|
|
d74ce6bd4f | ||
|
|
57942b4676 | ||
|
|
e0d05b0f1e | ||
|
|
2d9dd14f27 | ||
|
|
1caa56128f | ||
|
|
0101e76dbe | ||
|
|
2ef9352b94 | ||
|
|
5580ae2472 | ||
|
|
3a9f447141 | ||
|
|
9c2941e61b | ||
|
|
238ac5e765 | ||
|
|
4f4980b66b | ||
|
|
22e93efa41 | ||
|
|
2909dce894 | ||
|
|
df32537312 | ||
|
|
3367b5f3df | ||
|
|
46edbbc518 | ||
|
|
d2ff18cd6b | ||
|
|
df086d3c8c | ||
|
|
8baaaa39c0 | ||
|
|
f9961c70ae | ||
|
|
cd8fad3398 | ||
|
|
9983fa5f4e | ||
|
|
dfda91c2ee | ||
|
|
fac9060da5 | ||
|
|
a554616f8e | ||
|
|
77d96da94b | ||
|
|
0d6e3565ae | ||
|
|
b5939008a1 | ||
|
|
e9ce91e9a6 | ||
|
|
4ad6c9b11f | ||
|
|
c0285158a9 | ||
|
|
77a66df72c | ||
|
|
5b4837f881 | ||
|
|
29340c2e62 | ||
|
|
d5ec730354 | ||
|
|
8bed487aba | ||
|
|
c1a10a6e9b | ||
|
|
ddbfa6fe31 | ||
|
|
2fcd41ef81 | ||
|
|
16f4603b67 | ||
|
|
1184686649 | ||
|
|
2588cb2daa | ||
|
|
c7ea8f237e | ||
|
|
0b3118e0af | ||
|
|
05face44ef | ||
|
|
a2ad952440 | ||
|
|
5fea4410be | ||
|
|
b846eb64d0 | ||
|
|
3c5dd9ed1d | ||
|
|
b17ccd0542 | ||
|
|
d0409f772f | ||
|
|
ec261422af | ||
|
|
0498f7ce56 | ||
|
|
738a8d12eb | ||
|
|
d966b730ac | ||
|
|
9a70aecccb | ||
|
|
22cd5eaab6 | ||
|
|
304a8799ca | ||
|
|
2a2fa3c329 | ||
|
|
55978c1dc9 | ||
|
|
d4ebdadbe7 | ||
|
|
e201efa14b | ||
|
|
c5f21f73a4 | ||
|
|
371bc73531 | ||
|
|
c651d8b824 | ||
|
|
cf50ef5b51 | ||
|
|
697bea6939 | ||
|
|
10da41d677 | ||
|
|
db356c8519 | ||
|
|
b80081022f | ||
|
|
790457398a | ||
|
|
511069a2a5 | ||
|
|
5a85070c22 | ||
|
|
291700c92d | ||
|
|
9db28af84e | ||
|
|
e5202eb687 | ||
|
|
96fb441abd | ||
|
|
495c06e4a6 | ||
|
|
fa24e73b82 | ||
|
|
325d74985b | ||
|
|
fabf2f3467 | ||
|
|
d9cd3d9667 | ||
|
|
a607d922f0 | ||
|
|
7555ea44f8 | ||
|
|
df06812494 | ||
|
|
1d1eb1688c | ||
|
|
23dc179350 | ||
|
|
63aac0edc5 | ||
|
|
6558f94ed0 | ||
|
|
1ca484f67e | ||
|
|
72b0c32fe9 | ||
|
|
68c28224f8 | ||
|
|
54dbfa4c4a | ||
|
|
5646826a79 | ||
|
|
3269535a4c | ||
|
|
1b991d0ba9 | ||
|
|
51082535e1 | ||
|
|
9adca7f711 | ||
|
|
89bbaafa64 | ||
|
|
35934b2e05 | ||
|
|
f8ef4439e9 | ||
|
|
d4cd695759 | ||
|
|
5e7fd6906f | ||
|
|
811b1f03c8 | ||
|
|
ed195f3562 | ||
|
|
e0d0072ef1 | ||
|
|
620a2ffcfb | ||
|
|
d287013f24 | ||
|
|
6b5bdfa6c9 | ||
|
|
c063ee4af0 | ||
|
|
d99fa6ce0a | ||
|
|
3948c6ea06 | ||
|
|
b85982eb91 | ||
|
|
86b0dd4b16 | ||
|
|
f728738427 | ||
|
|
115048a0d8 | ||
|
|
1b417a7836 | ||
|
|
0174665d0e | ||
|
|
630518f0d9 | ||
|
|
6e16098a60 | ||
|
|
6ee8c80199 | ||
|
|
31f0551dab | ||
|
|
4a1abfe4fa | ||
|
|
bbd41494bf | ||
|
|
fedba24a63 | ||
|
|
e3b090dbc5 | ||
|
|
d9e60f634b | ||
|
|
4251b342de | ||
|
|
0a9d348023 | ||
|
|
3144e2a439 | ||
|
|
c0960e29b5 | ||
|
|
5314fc9b63 | ||
|
|
a36b5fef3b | ||
|
|
910e9401d0 | ||
|
|
56ffc3023a | ||
|
|
7a1b37ac64 | ||
|
|
5d4d2e2c60 | ||
|
|
7db5bcf73b | ||
|
|
fa2f095bd9 | ||
|
|
045b855db9 | ||
|
|
32064a0646 | ||
|
|
d9a250e9b5 | ||
|
|
944519ed16 | ||
|
|
2dd040d04c | ||
|
|
bbe41ce41a | ||
|
|
9e1406e4ed | ||
|
|
b74580c913 | ||
|
|
7e9405fd07 | ||
|
|
3b0b8930d4 | ||
|
|
e3f925fc1b | ||
|
|
2a2289fb6b | ||
|
|
dd427f499a | ||
|
|
2ae573c7ed | ||
|
|
02fe26c44b | ||
|
|
16c7548460 | ||
|
|
fa75998c0d | ||
|
|
5344f886c8 | ||
|
|
6cc823c9b5 | ||
|
|
b84d34e632 | ||
|
|
30229a913c | ||
|
|
1ade380bd7 | ||
|
|
ba264e9da8 | ||
|
|
a2405ec831 | ||
|
|
ce809bb529 | ||
|
|
76bc4d0458 | ||
|
|
4a02945a15 | ||
|
|
aec742b6d2 | ||
|
|
f337642e94 | ||
|
|
51131cc6e2 | ||
|
|
43027789dc | ||
|
|
f9b7d65e2b | ||
|
|
1f05d77110 | ||
|
|
c3ff36088b | ||
|
|
13524b5e72 | ||
|
|
f1b049fed8 | ||
|
|
97c5696945 | ||
|
|
47d4e22673 | ||
|
|
32f62fbb8e | ||
|
|
5d75505ebd | ||
|
|
b9495ea162 | ||
|
|
409bb9674e | ||
|
|
d3479c07a1 | ||
|
|
b12f1b984f | ||
|
|
195e3d9dbd | ||
|
|
38fe1a368b | ||
|
|
4b77fcb2b9 | ||
|
|
cde13bcdea | ||
|
|
0f0cd265a7 | ||
|
|
0db4706ec2 | ||
|
|
1ebdbd9694 | ||
|
|
5c59455b59 | ||
|
|
00d06619a1 | ||
|
|
f1ef3f9947 | ||
|
|
5a5dca13b2 | ||
|
|
7232f1fa41 | ||
|
|
72e7a49aa9 | ||
|
|
a3737cbd33 | ||
|
|
998f1785b6 | ||
|
|
70a93057cd | ||
|
|
2cb0fa7d40 | ||
|
|
b2816bca67 | ||
|
|
bf704423c5 | ||
|
|
7a0899d62d | ||
|
|
0cca1486dd | ||
|
|
2113c9d31a | ||
|
|
6deebf2489 | ||
|
|
95cb38ae47 | ||
|
|
1f126afb2d | ||
|
|
f6201a7a6c | ||
|
|
b3f6c6598f | ||
|
|
88620e983a | ||
|
|
b99c291f47 |
@@ -1,9 +1,9 @@
|
|||||||
.vscode
|
.vscode
|
||||||
ollama
|
ollama
|
||||||
app
|
app
|
||||||
|
macapp
|
||||||
dist
|
dist
|
||||||
scripts
|
llm/llama.cpp
|
||||||
llm/llama.cpp/ggml
|
|
||||||
llm/llama.cpp/gguf
|
|
||||||
.env
|
.env
|
||||||
.cache
|
.cache
|
||||||
|
test_data
|
||||||
|
|||||||
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
llm/ext_server/* linguist-vendored
|
||||||
60
.github/ISSUE_TEMPLATE/10_bug_report.yml
vendored
Normal file
60
.github/ISSUE_TEMPLATE/10_bug_report.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
name: Bug report
|
||||||
|
labels: [bug]
|
||||||
|
description: Something isn't working right.
|
||||||
|
body:
|
||||||
|
- type: textarea
|
||||||
|
id: description
|
||||||
|
attributes:
|
||||||
|
label: What is the issue?
|
||||||
|
description: What happened? What did you expect to happen?
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: dropdown
|
||||||
|
id: os
|
||||||
|
attributes:
|
||||||
|
label: OS
|
||||||
|
description: Which operating system are you using?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Linux
|
||||||
|
- macOS
|
||||||
|
- Windows
|
||||||
|
- Docker
|
||||||
|
- WSL2
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: dropdown
|
||||||
|
id: gpu
|
||||||
|
attributes:
|
||||||
|
label: GPU
|
||||||
|
description: Which GPU are you using?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Nvidia
|
||||||
|
- AMD
|
||||||
|
- Intel
|
||||||
|
- Apple
|
||||||
|
- Other
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: dropdown
|
||||||
|
id: cpu
|
||||||
|
attributes:
|
||||||
|
label: CPU
|
||||||
|
description: Which CPU are you using?
|
||||||
|
multiple: true
|
||||||
|
options:
|
||||||
|
- Intel
|
||||||
|
- AMD
|
||||||
|
- Apple
|
||||||
|
- Other
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
|
- type: input
|
||||||
|
id: version
|
||||||
|
attributes:
|
||||||
|
label: Ollama version
|
||||||
|
description: What version of Ollama are you using? (`ollama --version`)
|
||||||
|
placeholder: e.g., 0.1.32
|
||||||
|
validations:
|
||||||
|
required: false
|
||||||
6
.github/ISSUE_TEMPLATE/20_feature_request.md
vendored
Normal file
6
.github/ISSUE_TEMPLATE/20_feature_request.md
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Request a new feature
|
||||||
|
labels: feature request
|
||||||
|
---
|
||||||
|
|
||||||
5
.github/ISSUE_TEMPLATE/30_model_request.md
vendored
Normal file
5
.github/ISSUE_TEMPLATE/30_model_request.md
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
name: Model request
|
||||||
|
about: Request support for a new model to be added to Ollama
|
||||||
|
labels: model request
|
||||||
|
---
|
||||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
blank_issues_enabled: true
|
||||||
|
contact_links:
|
||||||
|
- name: Help
|
||||||
|
url: https://discord.com/invite/ollama
|
||||||
|
about: Please join our Discord server for help using Ollama
|
||||||
|
- name: Troubleshooting
|
||||||
|
url: https://github.com/ollama/ollama/blob/main/docs/faq.md#faq
|
||||||
|
about: See the FAQ for common issues and solutions
|
||||||
24
.github/workflows/latest.yaml
vendored
Normal file
24
.github/workflows/latest.yaml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: latest
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [released]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update-latest:
|
||||||
|
environment: release
|
||||||
|
runs-on: linux
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKER_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
- name: Tag images as latest
|
||||||
|
env:
|
||||||
|
PUSH: "1"
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
export "VERSION=${GITHUB_REF_NAME#v}"
|
||||||
|
./scripts/tag_latest.sh
|
||||||
474
.github/workflows/release.yaml
vendored
Normal file
474
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,474 @@
|
|||||||
|
name: release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Full build of the Mac assets
|
||||||
|
build-darwin:
|
||||||
|
runs-on: macos-12
|
||||||
|
environment: release
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
echo "RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" >> $GITHUB_ENV
|
||||||
|
- name: key
|
||||||
|
env:
|
||||||
|
MACOS_SIGNING_KEY: ${{ secrets.MACOS_SIGNING_KEY }}
|
||||||
|
MACOS_SIGNING_KEY_PASSWORD: ${{ secrets.MACOS_SIGNING_KEY_PASSWORD }}
|
||||||
|
run: |
|
||||||
|
echo $MACOS_SIGNING_KEY | base64 --decode > certificate.p12
|
||||||
|
security create-keychain -p password build.keychain
|
||||||
|
security default-keychain -s build.keychain
|
||||||
|
security unlock-keychain -p password build.keychain
|
||||||
|
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
|
||||||
|
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
|
||||||
|
security set-keychain-settings -lut 3600 build.keychain
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- name: Build Darwin
|
||||||
|
env:
|
||||||
|
APPLE_IDENTITY: ${{ secrets.APPLE_IDENTITY }}
|
||||||
|
APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }}
|
||||||
|
APPLE_TEAM_ID: ${{ vars.APPLE_TEAM_ID }}
|
||||||
|
APPLE_ID: ${{ vars.APPLE_ID }}
|
||||||
|
SDKROOT: /Applications/Xcode_13.4.1.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk
|
||||||
|
DEVELOPER_DIR: /Applications/Xcode_13.4.1.app/Contents/Developer
|
||||||
|
run: |
|
||||||
|
./scripts/build_darwin.sh
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: dist-darwin
|
||||||
|
path: |
|
||||||
|
dist/*arwin*
|
||||||
|
!dist/*-cov
|
||||||
|
|
||||||
|
# Windows builds take a long time to both install the dependencies and build, so parallelize
|
||||||
|
# CPU generation step
|
||||||
|
generate-windows-cpu:
|
||||||
|
environment: release
|
||||||
|
runs-on: windows
|
||||||
|
env:
|
||||||
|
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
- uses: 'google-github-actions/auth@v2'
|
||||||
|
with:
|
||||||
|
project_id: 'ollama'
|
||||||
|
credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}'
|
||||||
|
- run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt
|
||||||
|
- name: install Windows SDK 8.1 to get signtool
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading SDK"
|
||||||
|
Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait
|
||||||
|
write-host "Win SDK 8.1 installed"
|
||||||
|
gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe'
|
||||||
|
- name: install signing plugin
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading plugin"
|
||||||
|
Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip"
|
||||||
|
Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\
|
||||||
|
write-host "Installing plugin"
|
||||||
|
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||||
|
write-host "plugin installed"
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- run: go get ./...
|
||||||
|
- run: |
|
||||||
|
$gopath=(get-command go).source | split-path -parent
|
||||||
|
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||||
|
cd $env:GITHUB_WORKSPACE
|
||||||
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
|
$env:PATH="$gopath;$env:PATH"
|
||||||
|
go generate -x ./...
|
||||||
|
name: go generate
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: generate-windows-cpu
|
||||||
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
llm/build/**/*.a
|
||||||
|
dist/windows-amd64/**
|
||||||
|
|
||||||
|
# ROCm generation step
|
||||||
|
generate-windows-rocm:
|
||||||
|
environment: release
|
||||||
|
runs-on: windows
|
||||||
|
env:
|
||||||
|
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
- uses: 'google-github-actions/auth@v2'
|
||||||
|
with:
|
||||||
|
project_id: 'ollama'
|
||||||
|
credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}'
|
||||||
|
- run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt
|
||||||
|
- name: install Windows SDK 8.1 to get signtool
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading SDK"
|
||||||
|
Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait
|
||||||
|
write-host "Win SDK 8.1 installed"
|
||||||
|
gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe'
|
||||||
|
- name: install signing plugin
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading plugin"
|
||||||
|
Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip"
|
||||||
|
Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\
|
||||||
|
write-host "Installing plugin"
|
||||||
|
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||||
|
write-host "plugin installed"
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- name: 'Install ROCm'
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading AMD HIP Installer"
|
||||||
|
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||||
|
write-host "Installing AMD HIP"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||||
|
write-host "Completed AMD HIP"
|
||||||
|
- name: 'Verify ROCm'
|
||||||
|
run: |
|
||||||
|
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||||
|
- run: go get ./...
|
||||||
|
- run: |
|
||||||
|
$gopath=(get-command go).source | split-path -parent
|
||||||
|
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||||
|
cd $env:GITHUB_WORKSPACE
|
||||||
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
|
$env:PATH="$gopath;$env:PATH"
|
||||||
|
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||||
|
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||||
|
go generate -x ./...
|
||||||
|
name: go generate
|
||||||
|
- name: 'gather rocm dependencies'
|
||||||
|
run: |
|
||||||
|
$HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||||
|
md "dist\deps\bin\rocblas\library"
|
||||||
|
cp "${HIP_PATH}\bin\hipblas.dll" "dist\deps\bin\"
|
||||||
|
cp "${HIP_PATH}\bin\rocblas.dll" "dist\deps\bin\"
|
||||||
|
cp "${HIP_PATH}\bin\rocblas\library\*" "dist\deps\bin\rocblas\library\"
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: generate-windows-rocm
|
||||||
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: windows-rocm-deps
|
||||||
|
path: dist/deps/*
|
||||||
|
|
||||||
|
# CUDA generation step
|
||||||
|
generate-windows-cuda:
|
||||||
|
environment: release
|
||||||
|
runs-on: windows
|
||||||
|
env:
|
||||||
|
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
- uses: 'google-github-actions/auth@v2'
|
||||||
|
with:
|
||||||
|
project_id: 'ollama'
|
||||||
|
credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}'
|
||||||
|
- run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt
|
||||||
|
- name: install Windows SDK 8.1 to get signtool
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading SDK"
|
||||||
|
Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait
|
||||||
|
write-host "Win SDK 8.1 installed"
|
||||||
|
gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe'
|
||||||
|
- name: install signing plugin
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading plugin"
|
||||||
|
Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip"
|
||||||
|
Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\
|
||||||
|
write-host "Installing plugin"
|
||||||
|
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||||
|
write-host "plugin installed"
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- name: 'Install CUDA'
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading CUDA Installer"
|
||||||
|
Invoke-WebRequest -Uri "https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe" -OutFile "${env:RUNNER_TEMP}\cuda-install.exe"
|
||||||
|
write-host "Installing CUDA"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\cuda-install.exe" -ArgumentList '-s' -NoNewWindow -Wait
|
||||||
|
write-host "Completed CUDA"
|
||||||
|
$cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path)
|
||||||
|
$cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2'
|
||||||
|
echo "$cudaPath\bin" >> $env:GITHUB_PATH
|
||||||
|
echo "CUDA_PATH=$cudaPath" >> $env:GITHUB_ENV
|
||||||
|
echo "CUDA_PATH_V${cudaVer}=$cudaPath" >> $env:GITHUB_ENV
|
||||||
|
echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" >> $env:GITHUB_ENV
|
||||||
|
- name: 'Verify CUDA'
|
||||||
|
run: nvcc -V
|
||||||
|
- run: go get ./...
|
||||||
|
- name: go generate
|
||||||
|
run: |
|
||||||
|
$gopath=(get-command go).source | split-path -parent
|
||||||
|
$cudabin=(get-command nvcc).source | split-path
|
||||||
|
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||||
|
cd $env:GITHUB_WORKSPACE
|
||||||
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
|
$env:PATH="$gopath;$cudabin;$env:PATH"
|
||||||
|
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||||
|
go generate -x ./...
|
||||||
|
- name: 'gather cuda dependencies'
|
||||||
|
run: |
|
||||||
|
$NVIDIA_DIR=(resolve-path 'C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\*\bin\')[0]
|
||||||
|
md "dist\deps"
|
||||||
|
cp "${NVIDIA_DIR}\cudart64_*.dll" "dist\deps\"
|
||||||
|
cp "${NVIDIA_DIR}\cublas64_*.dll" "dist\deps\"
|
||||||
|
cp "${NVIDIA_DIR}\cublasLt64_*.dll" "dist\deps\"
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: generate-windows-cuda
|
||||||
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: windows-cuda-deps
|
||||||
|
path: dist/deps/*
|
||||||
|
|
||||||
|
# Import the prior generation steps and build the final windows assets
|
||||||
|
build-windows:
|
||||||
|
environment: release
|
||||||
|
runs-on: windows
|
||||||
|
needs:
|
||||||
|
- generate-windows-cuda
|
||||||
|
- generate-windows-rocm
|
||||||
|
- generate-windows-cpu
|
||||||
|
env:
|
||||||
|
KEY_CONTAINER: ${{ vars.KEY_CONTAINER }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
- uses: 'google-github-actions/auth@v2'
|
||||||
|
with:
|
||||||
|
project_id: 'ollama'
|
||||||
|
credentials_json: '${{ secrets.GOOGLE_SIGNING_CREDENTIALS }}'
|
||||||
|
- run: echo "${{ vars.OLLAMA_CERT }}" > ollama_inc.crt
|
||||||
|
- name: install Windows SDK 8.1 to get signtool
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading SDK"
|
||||||
|
Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkId=323507" -OutFile "${env:RUNNER_TEMP}\sdksetup.exe"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\sdksetup.exe" -ArgumentList @("/q") -NoNewWindow -Wait
|
||||||
|
write-host "Win SDK 8.1 installed"
|
||||||
|
gci -path 'C:\Program Files (x86)\Windows Kits\' -r -fi 'signtool.exe'
|
||||||
|
- name: install signing plugin
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading plugin"
|
||||||
|
Invoke-WebRequest -Uri "https://github.com/GoogleCloudPlatform/kms-integrations/releases/download/cng-v1.0/kmscng-1.0-windows-amd64.zip" -OutFile "${env:RUNNER_TEMP}\plugin.zip"
|
||||||
|
Expand-Archive -Path "${env:RUNNER_TEMP}\plugin.zip" -DestinationPath ${env:RUNNER_TEMP}\plugin\
|
||||||
|
write-host "Installing plugin"
|
||||||
|
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||||
|
write-host "plugin installed"
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- run: go get
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: generate-windows-cpu
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: generate-windows-cuda
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: windows-cuda-deps
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: windows-rocm-deps
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: generate-windows-rocm
|
||||||
|
- run: dir llm/build
|
||||||
|
- run: |
|
||||||
|
$gopath=(get-command go).source | split-path -parent
|
||||||
|
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||||
|
cd $env:GITHUB_WORKSPACE
|
||||||
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
|
$env:PATH="$gopath;$env:PATH"
|
||||||
|
$env:OLLAMA_SKIP_GENERATE="1"
|
||||||
|
& .\scripts\build_windows.ps1
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: dist-windows
|
||||||
|
path: |
|
||||||
|
dist/OllamaSetup.exe
|
||||||
|
dist/ollama-windows-*.zip
|
||||||
|
|
||||||
|
# Linux x86 assets built using the container based build
|
||||||
|
build-linux-amd64:
|
||||||
|
environment: release
|
||||||
|
runs-on: linux
|
||||||
|
env:
|
||||||
|
OLLAMA_SKIP_MANIFEST_CREATE: '1'
|
||||||
|
BUILD_ARCH: amd64
|
||||||
|
PUSH: '1'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKER_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
- run: |
|
||||||
|
./scripts/build_linux.sh
|
||||||
|
./scripts/build_docker.sh
|
||||||
|
mv dist/deps/* dist/
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: dist-linux-amd64
|
||||||
|
path: |
|
||||||
|
dist/*linux*
|
||||||
|
!dist/*-cov
|
||||||
|
|
||||||
|
# Linux ARM assets built using the container based build
|
||||||
|
# (at present, docker isn't pre-installed on arm ubunutu images)
|
||||||
|
build-linux-arm64:
|
||||||
|
environment: release
|
||||||
|
runs-on: linux-arm64
|
||||||
|
env:
|
||||||
|
OLLAMA_SKIP_MANIFEST_CREATE: '1'
|
||||||
|
BUILD_ARCH: arm64
|
||||||
|
PUSH: '1'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
- name: 'Install Docker'
|
||||||
|
run: |
|
||||||
|
# Add Docker's official GPG key:
|
||||||
|
env
|
||||||
|
uname -a
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y ca-certificates curl
|
||||||
|
sudo install -m 0755 -d /etc/apt/keyrings
|
||||||
|
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
|
||||||
|
sudo chmod a+r /etc/apt/keyrings/docker.asc
|
||||||
|
|
||||||
|
# Add the repository to Apt sources:
|
||||||
|
echo \
|
||||||
|
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
|
||||||
|
$(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
|
||||||
|
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
|
||||||
|
sudo usermod -aG docker $USER
|
||||||
|
sudo apt-get install acl
|
||||||
|
sudo setfacl --modify user:$USER:rw /var/run/docker.sock
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKER_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
- run: |
|
||||||
|
./scripts/build_linux.sh
|
||||||
|
./scripts/build_docker.sh
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: dist-linux-arm64
|
||||||
|
path: |
|
||||||
|
dist/*linux*
|
||||||
|
!dist/*-cov
|
||||||
|
|
||||||
|
# Aggregate all the assets and ship a release
|
||||||
|
release:
|
||||||
|
needs:
|
||||||
|
- build-darwin
|
||||||
|
- build-windows
|
||||||
|
- build-linux-amd64
|
||||||
|
- build-linux-arm64
|
||||||
|
runs-on: linux
|
||||||
|
environment: release
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
env:
|
||||||
|
OLLAMA_SKIP_IMAGE_BUILD: '1'
|
||||||
|
PUSH: '1'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set Version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "VERSION=${GITHUB_REF_NAME#v}" >> $GITHUB_ENV
|
||||||
|
echo "RELEASE_VERSION=$(echo ${GITHUB_REF_NAME} | cut -f1 -d-)" >> $GITHUB_ENV
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ vars.DOCKER_USER }}
|
||||||
|
password: ${{ secrets.DOCKER_ACCESS_TOKEN }}
|
||||||
|
- run: ./scripts/build_docker.sh
|
||||||
|
- name: Retrieve built artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
pattern: dist-*
|
||||||
|
merge-multiple: true
|
||||||
|
- run: |
|
||||||
|
ls -lh dist/
|
||||||
|
(cd dist; sha256sum * > sha256sum.txt)
|
||||||
|
cat dist/sha256sum.txt
|
||||||
|
- uses: ncipollo/release-action@v1
|
||||||
|
with:
|
||||||
|
name: ${{ env.RELEASE_VERSION }}
|
||||||
|
allowUpdates: true
|
||||||
|
artifacts: 'dist/*'
|
||||||
|
draft: true
|
||||||
|
prerelease: true
|
||||||
|
omitBodyDuringUpdate: true
|
||||||
|
generateReleaseNotes: true
|
||||||
|
omitDraftDuringUpdate: true
|
||||||
|
omitPrereleaseDuringUpdate: true
|
||||||
|
replacesArtifacts: true
|
||||||
321
.github/workflows/test.yaml
vendored
Normal file
321
.github/workflows/test.yaml
vendored
Normal file
@@ -0,0 +1,321 @@
|
|||||||
|
name: test
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
# For PRs, later CI runs preempt previous ones. e.g. a force push on a PR
|
||||||
|
# cancels running CI jobs and starts all new ones.
|
||||||
|
#
|
||||||
|
# For non-PR pushes, concurrency.group needs to be unique for every distinct
|
||||||
|
# CI run we want to have happen. Use run_id, which in practice means all
|
||||||
|
# non-PR CI runs will be allowed to run without preempting each other.
|
||||||
|
group: ${{ github.workflow }}-$${{ github.pull_request.number || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '**/*'
|
||||||
|
- '!docs/**'
|
||||||
|
- '!README.md'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
changes:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
GENERATE: ${{ steps.changes.outputs.GENERATE }}
|
||||||
|
GENERATE_CUDA: ${{ steps.changes.outputs.GENERATE_CUDA }}
|
||||||
|
GENERATE_ROCM: ${{ steps.changes.outputs.GENERATE_ROCM }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- id: changes
|
||||||
|
run: |
|
||||||
|
changed() {
|
||||||
|
git diff-tree -r --no-commit-id --name-only \
|
||||||
|
$(git merge-base ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }}) \
|
||||||
|
${{ github.event.pull_request.head.sha }} \
|
||||||
|
| xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))"
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
echo GENERATE=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
|
echo GENERATE_CUDA=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
|
echo GENERATE_ROCM=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
|
} >>$GITHUB_OUTPUT
|
||||||
|
|
||||||
|
generate:
|
||||||
|
needs: [changes]
|
||||||
|
if: ${{ needs.changes.outputs.GENERATE == 'True' }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest, windows-2019]
|
||||||
|
arch: [amd64, arm64]
|
||||||
|
exclude:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
arch: arm64
|
||||||
|
- os: windows-2019
|
||||||
|
arch: arm64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
env:
|
||||||
|
GOARCH: ${{ matrix.arch }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- run: go get ./...
|
||||||
|
- run: |
|
||||||
|
$gopath=(get-command go).source | split-path -parent
|
||||||
|
$gccpath=(get-command gcc).source | split-path -parent
|
||||||
|
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||||
|
cd $env:GITHUB_WORKSPACE
|
||||||
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
|
$env:PATH="$gopath;$gccpath;$env:PATH"
|
||||||
|
echo $env:PATH
|
||||||
|
go generate -x ./...
|
||||||
|
if: ${{ startsWith(matrix.os, 'windows-') }}
|
||||||
|
name: 'Windows Go Generate'
|
||||||
|
- run: go generate -x ./...
|
||||||
|
if: ${{ ! startsWith(matrix.os, 'windows-') }}
|
||||||
|
name: 'Unix Go Generate'
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
||||||
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
llm/build/**/*.a
|
||||||
|
generate-cuda:
|
||||||
|
needs: [changes]
|
||||||
|
if: ${{ needs.changes.outputs.GENERATE_CUDA == 'True' }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
cuda-version:
|
||||||
|
- '11.8.0'
|
||||||
|
runs-on: linux
|
||||||
|
container: nvidia/cuda:${{ matrix.cuda-version }}-devel-ubuntu20.04
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
apt-get update && apt-get install -y git build-essential curl
|
||||||
|
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
||||||
|
| tar -zx -C /usr --strip-components 1
|
||||||
|
env:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- run: go get ./...
|
||||||
|
- run: |
|
||||||
|
git config --global --add safe.directory /__w/ollama/ollama
|
||||||
|
go generate -x ./...
|
||||||
|
env:
|
||||||
|
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: cuda-${{ matrix.cuda-version }}-libraries
|
||||||
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
|
generate-rocm:
|
||||||
|
needs: [changes]
|
||||||
|
if: ${{ needs.changes.outputs.GENERATE_ROCM == 'True' }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
rocm-version:
|
||||||
|
- '6.0.2'
|
||||||
|
runs-on: linux
|
||||||
|
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
apt-get update && apt-get install -y git build-essential curl rocm-libs
|
||||||
|
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
||||||
|
| tar -zx -C /usr --strip-components 1
|
||||||
|
env:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- run: go get ./...
|
||||||
|
- run: |
|
||||||
|
git config --global --add safe.directory /__w/ollama/ollama
|
||||||
|
go generate -x ./...
|
||||||
|
env:
|
||||||
|
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: rocm-${{ matrix.rocm-version }}-libraries
|
||||||
|
path: |
|
||||||
|
llm/build/**/bin/*
|
||||||
|
dist/windows-amd64/**
|
||||||
|
|
||||||
|
# ROCm generation step
|
||||||
|
generate-windows-rocm:
|
||||||
|
needs: [changes]
|
||||||
|
if: ${{ needs.changes.outputs.GENERATE_ROCM == 'True' }}
|
||||||
|
runs-on: windows
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- name: 'Install ROCm'
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading AMD HIP Installer"
|
||||||
|
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||||
|
write-host "Installing AMD HIP"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||||
|
write-host "Completed AMD HIP"
|
||||||
|
- name: 'Verify ROCm'
|
||||||
|
run: |
|
||||||
|
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
|
||||||
|
- run: go get ./...
|
||||||
|
- run: |
|
||||||
|
$gopath=(get-command go).source | split-path -parent
|
||||||
|
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||||
|
cd $env:GITHUB_WORKSPACE
|
||||||
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
|
$env:PATH="$gopath;$env:PATH"
|
||||||
|
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||||
|
$env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path)
|
||||||
|
go generate -x ./...
|
||||||
|
name: go generate
|
||||||
|
env:
|
||||||
|
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||||
|
# TODO - do we need any artifacts?
|
||||||
|
|
||||||
|
# CUDA generation step
|
||||||
|
generate-windows-cuda:
|
||||||
|
needs: [changes]
|
||||||
|
if: ${{ needs.changes.outputs.GENERATE_CUDA == 'True' }}
|
||||||
|
runs-on: windows
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- name: 'Install CUDA'
|
||||||
|
run: |
|
||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
write-host "downloading CUDA Installer"
|
||||||
|
Invoke-WebRequest -Uri "https://developer.download.nvidia.com/compute/cuda/11.3.1/local_installers/cuda_11.3.1_465.89_win10.exe" -OutFile "${env:RUNNER_TEMP}\cuda-install.exe"
|
||||||
|
write-host "Installing CUDA"
|
||||||
|
Start-Process "${env:RUNNER_TEMP}\cuda-install.exe" -ArgumentList '-s' -NoNewWindow -Wait
|
||||||
|
write-host "Completed CUDA"
|
||||||
|
$cudaPath=((resolve-path "c:\Program Files\NVIDIA*\CUDA\v*\bin\nvcc.exe")[0].path | split-path | split-path)
|
||||||
|
$cudaVer=($cudaPath | split-path -leaf ) -replace 'v(\d+).(\d+)', '$1_$2'
|
||||||
|
echo "$cudaPath\bin" >> $env:GITHUB_PATH
|
||||||
|
echo "CUDA_PATH=$cudaPath" >> $env:GITHUB_ENV
|
||||||
|
echo "CUDA_PATH_V${cudaVer}=$cudaPath" >> $env:GITHUB_ENV
|
||||||
|
echo "CUDA_PATH_VX_Y=CUDA_PATH_V${cudaVer}" >> $env:GITHUB_ENV
|
||||||
|
- name: 'Verify CUDA'
|
||||||
|
run: nvcc -V
|
||||||
|
- run: go get ./...
|
||||||
|
- name: go generate
|
||||||
|
run: |
|
||||||
|
$gopath=(get-command go).source | split-path -parent
|
||||||
|
$cudabin=(get-command nvcc).source | split-path
|
||||||
|
& "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\Tools\Launch-VsDevShell.ps1"
|
||||||
|
cd $env:GITHUB_WORKSPACE
|
||||||
|
$env:CMAKE_SYSTEM_VERSION="10.0.22621.0"
|
||||||
|
$env:PATH="$gopath;$cudabin;$env:PATH"
|
||||||
|
$env:OLLAMA_SKIP_CPU_GENERATE="1"
|
||||||
|
go generate -x ./...
|
||||||
|
env:
|
||||||
|
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||||
|
# TODO - do we need any artifacts?
|
||||||
|
|
||||||
|
lint:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest, windows-2019]
|
||||||
|
arch: [amd64, arm64]
|
||||||
|
exclude:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
arch: arm64
|
||||||
|
- os: windows-2019
|
||||||
|
arch: arm64
|
||||||
|
- os: macos-latest
|
||||||
|
arch: amd64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
env:
|
||||||
|
GOARCH: ${{ matrix.arch }}
|
||||||
|
CGO_ENABLED: '1'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: false
|
||||||
|
- run: |
|
||||||
|
case ${{ matrix.arch }} in
|
||||||
|
amd64) echo ARCH=x86_64 ;;
|
||||||
|
arm64) echo ARCH=arm64 ;;
|
||||||
|
esac >>$GITHUB_ENV
|
||||||
|
shell: bash
|
||||||
|
- run: |
|
||||||
|
mkdir -p llm/build/linux/$ARCH/stub/bin
|
||||||
|
touch llm/build/linux/$ARCH/stub/bin/ollama_llama_server
|
||||||
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
|
- run: |
|
||||||
|
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
||||||
|
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
||||||
|
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||||
|
- uses: golangci/golangci-lint-action@v6
|
||||||
|
with:
|
||||||
|
args: --timeout 8m0s -v ${{ startsWith(matrix.os, 'windows-') && '' || '--disable gofmt --disable goimports' }}
|
||||||
|
test:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ubuntu-latest, macos-latest, windows-2019]
|
||||||
|
arch: [amd64]
|
||||||
|
exclude:
|
||||||
|
- os: ubuntu-latest
|
||||||
|
arch: arm64
|
||||||
|
- os: windows-2019
|
||||||
|
arch: arm64
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
env:
|
||||||
|
GOARCH: ${{ matrix.arch }}
|
||||||
|
CGO_ENABLED: '1'
|
||||||
|
OLLAMA_CPU_TARGET: 'static'
|
||||||
|
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||||
|
OLLAMA_SKIP_METAL_GENERATE: '1'
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: recursive
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: go.mod
|
||||||
|
cache: true
|
||||||
|
- run: |
|
||||||
|
case ${{ matrix.arch }} in
|
||||||
|
amd64) echo ARCH=x86_64 ;;
|
||||||
|
arm64) echo ARCH=arm64 ;;
|
||||||
|
esac >>$GITHUB_ENV
|
||||||
|
shell: bash
|
||||||
|
- run: |
|
||||||
|
mkdir -p llm/build/linux/$ARCH/stub/bin
|
||||||
|
touch llm/build/linux/$ARCH/stub/bin/ollama_llama_server
|
||||||
|
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||||
|
- run: |
|
||||||
|
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
||||||
|
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
||||||
|
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||||
|
shell: bash
|
||||||
|
- run: go generate ./...
|
||||||
|
- run: go build
|
||||||
|
- run: go test -v ./...
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.os }}-binaries
|
||||||
|
path: ollama
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -9,3 +9,7 @@ ggml-metal.metal
|
|||||||
.cache
|
.cache
|
||||||
*.exe
|
*.exe
|
||||||
.idea
|
.idea
|
||||||
|
test_data
|
||||||
|
*.crt
|
||||||
|
llm/build
|
||||||
|
__debug_bin*
|
||||||
10
.gitmodules
vendored
10
.gitmodules
vendored
@@ -1,10 +1,4 @@
|
|||||||
[submodule "llm/llama.cpp/ggml"]
|
[submodule "llama.cpp"]
|
||||||
path = llm/llama.cpp/ggml
|
path = llm/llama.cpp
|
||||||
url = https://github.com/ggerganov/llama.cpp.git
|
url = https://github.com/ggerganov/llama.cpp.git
|
||||||
ignore = dirty
|
|
||||||
shallow = true
|
|
||||||
[submodule "llm/llama.cpp/gguf"]
|
|
||||||
path = llm/llama.cpp/gguf
|
|
||||||
url = https://github.com/ggerganov/llama.cpp.git
|
|
||||||
ignore = dirty
|
|
||||||
shallow = true
|
shallow = true
|
||||||
34
.golangci.yaml
Normal file
34
.golangci.yaml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
run:
|
||||||
|
timeout: 5m
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- asasalint
|
||||||
|
- bidichk
|
||||||
|
- bodyclose
|
||||||
|
- containedctx
|
||||||
|
- contextcheck
|
||||||
|
- exportloopref
|
||||||
|
- gocheckcompilerdirectives
|
||||||
|
# conditionally enable this on linux/macos
|
||||||
|
# - gofmt
|
||||||
|
# - goimports
|
||||||
|
- intrange
|
||||||
|
- misspell
|
||||||
|
- nilerr
|
||||||
|
- nolintlint
|
||||||
|
- nosprintfhostport
|
||||||
|
- testifylint
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
- wastedassign
|
||||||
|
- whitespace
|
||||||
|
- usestdlibvars
|
||||||
|
severity:
|
||||||
|
default-severity: error
|
||||||
|
rules:
|
||||||
|
- linters:
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- intrange
|
||||||
|
- usestdlibvars
|
||||||
|
severity: info
|
||||||
145
Dockerfile
145
Dockerfile
@@ -1,29 +1,144 @@
|
|||||||
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
ARG GOLANG_VERSION=1.22.1
|
||||||
|
ARG CMAKE_VERSION=3.22.1
|
||||||
|
# this CUDA_VERSION corresponds with the one specified in docs/gpu.md
|
||||||
|
ARG CUDA_VERSION=11.3.1
|
||||||
|
ARG ROCM_VERSION=6.0.2
|
||||||
|
|
||||||
ARG TARGETARCH
|
# Copy the minimal context we need to run the generate scripts
|
||||||
ARG GOFLAGS="'-ldflags=-w -s'"
|
FROM scratch AS llm-code
|
||||||
|
COPY .git .git
|
||||||
|
COPY .gitmodules .gitmodules
|
||||||
|
COPY llm llm
|
||||||
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
FROM --platform=linux/amd64 nvidia/cuda:$CUDA_VERSION-devel-centos7 AS cuda-build-amd64
|
||||||
RUN apt-get update && apt-get install -y git build-essential cmake
|
ARG CMAKE_VERSION
|
||||||
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
|
COPY ./scripts/rh_linux_deps.sh /
|
||||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
|
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||||
|
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||||
|
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||||
|
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
||||||
|
|
||||||
|
FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION-devel-rockylinux8 AS cuda-build-arm64
|
||||||
|
ARG CMAKE_VERSION
|
||||||
|
COPY ./scripts/rh_linux_deps.sh /
|
||||||
|
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||||
|
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||||
|
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||||
|
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
||||||
|
|
||||||
|
FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete AS rocm-build-amd64
|
||||||
|
ARG CMAKE_VERSION
|
||||||
|
COPY ./scripts/rh_linux_deps.sh /
|
||||||
|
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||||
|
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||||
|
ENV LIBRARY_PATH /opt/amdgpu/lib64
|
||||||
|
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||||
|
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
ARG AMDGPU_TARGETS
|
||||||
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
||||||
|
RUN mkdir /tmp/scratch && \
|
||||||
|
for dep in $(zcat /go/src/github.com/ollama/ollama/llm/build/linux/x86_64/rocm*/bin/deps.txt.gz) ; do \
|
||||||
|
cp ${dep} /tmp/scratch/ || exit 1 ; \
|
||||||
|
done && \
|
||||||
|
(cd /opt/rocm/lib && tar cf - rocblas/library) | (cd /tmp/scratch/ && tar xf - ) && \
|
||||||
|
mkdir -p /go/src/github.com/ollama/ollama/dist/deps/ && \
|
||||||
|
(cd /tmp/scratch/ && tar czvf /go/src/github.com/ollama/ollama/dist/deps/ollama-linux-amd64-rocm.tgz . )
|
||||||
|
|
||||||
|
|
||||||
|
FROM --platform=linux/amd64 centos:7 AS cpu-builder-amd64
|
||||||
|
ARG CMAKE_VERSION
|
||||||
|
ARG GOLANG_VERSION
|
||||||
|
COPY ./scripts/rh_linux_deps.sh /
|
||||||
|
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||||
|
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||||
|
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||||
|
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||||
|
|
||||||
|
FROM --platform=linux/amd64 cpu-builder-amd64 AS static-build-amd64
|
||||||
|
RUN OLLAMA_CPU_TARGET="static" sh gen_linux.sh
|
||||||
|
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu-build-amd64
|
||||||
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
|
||||||
|
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx-build-amd64
|
||||||
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx" sh gen_linux.sh
|
||||||
|
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64
|
||||||
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx2" sh gen_linux.sh
|
||||||
|
|
||||||
|
FROM --platform=linux/arm64 centos:7 AS cpu-builder-arm64
|
||||||
|
ARG CMAKE_VERSION
|
||||||
|
ARG GOLANG_VERSION
|
||||||
|
COPY ./scripts/rh_linux_deps.sh /
|
||||||
|
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||||
|
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||||
|
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||||
|
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
WORKDIR /go/src/github.com/ollama/ollama/llm/generate
|
||||||
|
|
||||||
|
FROM --platform=linux/arm64 cpu-builder-arm64 AS static-build-arm64
|
||||||
|
RUN OLLAMA_CPU_TARGET="static" sh gen_linux.sh
|
||||||
|
FROM --platform=linux/arm64 cpu-builder-arm64 AS cpu-build-arm64
|
||||||
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
|
||||||
|
|
||||||
|
|
||||||
|
# Intermediate stage used for ./scripts/build_linux.sh
|
||||||
|
FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64
|
||||||
|
ENV CGO_ENABLED 1
|
||||||
|
WORKDIR /go/src/github.com/ollama/ollama
|
||||||
COPY . .
|
COPY . .
|
||||||
ENV GOARCH=$TARGETARCH
|
COPY --from=static-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/
|
||||||
ENV GOFLAGS=$GOFLAGS
|
COPY --from=cpu_avx-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/
|
||||||
RUN /usr/local/go/bin/go generate ./... \
|
COPY --from=cpu_avx2-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/
|
||||||
&& /usr/local/go/bin/go build .
|
COPY --from=cuda-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/
|
||||||
|
COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/
|
||||||
|
COPY --from=rocm-build-amd64 /go/src/github.com/ollama/ollama/dist/deps/ ./dist/deps/
|
||||||
|
ARG GOFLAGS
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
RUN go build -trimpath .
|
||||||
|
|
||||||
FROM ubuntu:22.04
|
# Intermediate stage used for ./scripts/build_linux.sh
|
||||||
|
FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64
|
||||||
|
ENV CGO_ENABLED 1
|
||||||
|
ARG GOLANG_VERSION
|
||||||
|
WORKDIR /go/src/github.com/ollama/ollama
|
||||||
|
COPY . .
|
||||||
|
COPY --from=static-build-arm64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/
|
||||||
|
COPY --from=cuda-build-arm64 /go/src/github.com/ollama/ollama/llm/build/linux/ llm/build/linux/
|
||||||
|
ARG GOFLAGS
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
RUN go build -trimpath .
|
||||||
|
|
||||||
|
# Runtime stages
|
||||||
|
FROM --platform=linux/amd64 ubuntu:22.04 as runtime-amd64
|
||||||
RUN apt-get update && apt-get install -y ca-certificates
|
RUN apt-get update && apt-get install -y ca-certificates
|
||||||
COPY --from=0 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
COPY --from=build-amd64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
|
||||||
|
FROM --platform=linux/arm64 ubuntu:22.04 as runtime-arm64
|
||||||
|
RUN apt-get update && apt-get install -y ca-certificates
|
||||||
|
COPY --from=build-arm64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
|
||||||
|
|
||||||
|
# Radeon images are much larger so we keep it distinct from the CPU/CUDA image
|
||||||
|
FROM --platform=linux/amd64 rocm/dev-centos-7:${ROCM_VERSION}-complete as runtime-rocm
|
||||||
|
RUN update-pciids
|
||||||
|
COPY --from=build-amd64 /go/src/github.com/ollama/ollama/ollama /bin/ollama
|
||||||
EXPOSE 11434
|
EXPOSE 11434
|
||||||
ENV OLLAMA_HOST 0.0.0.0
|
ENV OLLAMA_HOST 0.0.0.0
|
||||||
|
|
||||||
# set some environment variable for better NVIDIA compatibility
|
ENTRYPOINT ["/bin/ollama"]
|
||||||
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
CMD ["serve"]
|
||||||
|
|
||||||
|
FROM runtime-$TARGETARCH
|
||||||
|
EXPOSE 11434
|
||||||
|
ENV OLLAMA_HOST 0.0.0.0
|
||||||
|
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||||
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||||
|
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/ollama"]
|
ENTRYPOINT ["/bin/ollama"]
|
||||||
CMD ["serve"]
|
CMD ["serve"]
|
||||||
|
|||||||
@@ -1,31 +0,0 @@
|
|||||||
# centos7 amd64 dependencies
|
|
||||||
FROM --platform=linux/amd64 nvidia/cuda:11.3.1-devel-centos7 AS base-amd64
|
|
||||||
RUN yum install -y https://repo.ius.io/ius-release-el7.rpm centos-release-scl && \
|
|
||||||
yum update -y && \
|
|
||||||
yum install -y devtoolset-10-gcc devtoolset-10-gcc-c++ git236 wget
|
|
||||||
RUN wget "https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-x86_64.sh" -O cmake-installer.sh && chmod +x cmake-installer.sh && ./cmake-installer.sh --skip-license --prefix=/usr/local
|
|
||||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
|
||||||
|
|
||||||
# centos8 arm64 dependencies
|
|
||||||
FROM --platform=linux/arm64 nvidia/cuda-arm64:11.3.1-devel-centos8 AS base-arm64
|
|
||||||
RUN sed -i -e 's/mirrorlist/#mirrorlist/g' -e 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
|
|
||||||
RUN yum install -y git cmake
|
|
||||||
|
|
||||||
FROM base-${TARGETARCH}
|
|
||||||
ARG TARGETARCH
|
|
||||||
ARG GOFLAGS="'-ldflags -w -s'"
|
|
||||||
|
|
||||||
# install go
|
|
||||||
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
|
|
||||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
|
|
||||||
|
|
||||||
# build the final binary
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
ENV GOOS=linux
|
|
||||||
ENV GOARCH=$TARGETARCH
|
|
||||||
ENV GOFLAGS=$GOFLAGS
|
|
||||||
|
|
||||||
RUN /usr/local/go/bin/go generate ./... && \
|
|
||||||
/usr/local/go/bin/go build .
|
|
||||||
216
README.md
216
README.md
@@ -1,66 +1,72 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<picture>
|
<img alt="ollama" height="200px" src="https://github.com/ollama/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
||||||
<source media="(prefers-color-scheme: dark)" height="200px" srcset="https://github.com/jmorganca/ollama/assets/3325447/56ea1849-1284-4645-8970-956de6e51c3c">
|
|
||||||
<img alt="logo" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
|
||||||
</picture>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
# Ollama
|
# Ollama
|
||||||
|
|
||||||
[](https://discord.gg/ollama)
|
[](https://discord.gg/ollama)
|
||||||
|
|
||||||
Get up and running with large language models locally.
|
Get up and running with large language models.
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
[Download](https://ollama.ai/download/Ollama-darwin.zip)
|
[Download](https://ollama.com/download/Ollama-darwin.zip)
|
||||||
|
|
||||||
### Windows
|
### Windows preview
|
||||||
|
|
||||||
Coming soon!
|
[Download](https://ollama.com/download/OllamaSetup.exe)
|
||||||
|
|
||||||
### Linux & WSL2
|
### Linux
|
||||||
|
|
||||||
```
|
```
|
||||||
curl https://ollama.ai/install.sh | sh
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
|
[Manual install instructions](https://github.com/ollama/ollama/blob/main/docs/linux.md)
|
||||||
|
|
||||||
### Docker
|
### Docker
|
||||||
|
|
||||||
The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `ollama/ollama` is available on Docker Hub.
|
The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `ollama/ollama` is available on Docker Hub.
|
||||||
|
|
||||||
|
### Libraries
|
||||||
|
|
||||||
|
- [ollama-python](https://github.com/ollama/ollama-python)
|
||||||
|
- [ollama-js](https://github.com/ollama/ollama-js)
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
To run and chat with [Llama 2](https://ollama.ai/library/llama2):
|
To run and chat with [Llama 3](https://ollama.com/library/llama3):
|
||||||
|
|
||||||
```
|
```
|
||||||
ollama run llama2
|
ollama run llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
## Model library
|
## Model library
|
||||||
|
|
||||||
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library 'ollama model library')
|
Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library 'ollama model library')
|
||||||
|
|
||||||
Here are some example open-source models that can be downloaded:
|
Here are some example models that can be downloaded:
|
||||||
|
|
||||||
| Model | Parameters | Size | Download |
|
| Model | Parameters | Size | Download |
|
||||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||||
|
| Llama 3 | 8B | 4.7GB | `ollama run llama3` |
|
||||||
|
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
||||||
|
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||||
|
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||||
|
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
||||||
|
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
||||||
|
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||||
|
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
|
||||||
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
|
|
||||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||||
| Llama 2 13B | 13B | 7.3GB | `ollama run llama2:13b` |
|
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||||
| Llama 2 70B | 70B | 39GB | `ollama run llama2:70b` |
|
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
||||||
| Orca Mini | 3B | 1.9GB | `ollama run orca-mini` |
|
|
||||||
| Vicuna | 7B | 3.8GB | `ollama run vicuna` |
|
|
||||||
|
|
||||||
> Note: You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
|
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||||
|
|
||||||
## Customize your own model
|
## Customize a model
|
||||||
|
|
||||||
### Import from GGUF
|
### Import from GGUF
|
||||||
|
|
||||||
@@ -90,21 +96,21 @@ See the [guide](docs/import.md) on importing models for more information.
|
|||||||
|
|
||||||
### Customize a prompt
|
### Customize a prompt
|
||||||
|
|
||||||
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama2` model:
|
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama3` model:
|
||||||
|
|
||||||
```
|
```
|
||||||
ollama pull llama2
|
ollama pull llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
Create a `Modelfile`:
|
Create a `Modelfile`:
|
||||||
|
|
||||||
```
|
```
|
||||||
FROM llama2
|
FROM llama3
|
||||||
|
|
||||||
# set the temperature to 1 [higher is more creative, lower is more coherent]
|
# set the temperature to 1 [higher is more creative, lower is more coherent]
|
||||||
PARAMETER temperature 1
|
PARAMETER temperature 1
|
||||||
|
|
||||||
# set the system prompt
|
# set the system message
|
||||||
SYSTEM """
|
SYSTEM """
|
||||||
You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
|
You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
|
||||||
"""
|
"""
|
||||||
@@ -127,10 +133,14 @@ For more examples, see the [examples](examples) directory. For more information
|
|||||||
|
|
||||||
`ollama create` is used to create a model from a Modelfile.
|
`ollama create` is used to create a model from a Modelfile.
|
||||||
|
|
||||||
|
```
|
||||||
|
ollama create mymodel -f ./Modelfile
|
||||||
|
```
|
||||||
|
|
||||||
### Pull a model
|
### Pull a model
|
||||||
|
|
||||||
```
|
```
|
||||||
ollama pull llama2
|
ollama pull llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
> This command can also be used to update a local model. Only the diff will be pulled.
|
> This command can also be used to update a local model. Only the diff will be pulled.
|
||||||
@@ -138,13 +148,13 @@ ollama pull llama2
|
|||||||
### Remove a model
|
### Remove a model
|
||||||
|
|
||||||
```
|
```
|
||||||
ollama rm llama2
|
ollama rm llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
### Copy a model
|
### Copy a model
|
||||||
|
|
||||||
```
|
```
|
||||||
ollama cp llama2 my-llama2
|
ollama cp llama3 my-model
|
||||||
```
|
```
|
||||||
|
|
||||||
### Multiline input
|
### Multiline input
|
||||||
@@ -158,10 +168,17 @@ For multiline input, you can wrap text with `"""`:
|
|||||||
I'm a basic program that prints the famous "Hello, world!" message to the console.
|
I'm a basic program that prints the famous "Hello, world!" message to the console.
|
||||||
```
|
```
|
||||||
|
|
||||||
### Pass in prompt as arguments
|
### Multimodal models
|
||||||
|
|
||||||
```
|
```
|
||||||
$ ollama run llama2 "Summarize this file: $(cat README.md)"
|
>>> What's in this image? /Users/jmorgan/Desktop/smile.png
|
||||||
|
The image features a yellow smiley face, which is likely the central focus of the picture.
|
||||||
|
```
|
||||||
|
|
||||||
|
### Pass the prompt as an argument
|
||||||
|
|
||||||
|
```
|
||||||
|
$ ollama run llama3 "Summarize this file: $(cat README.md)"
|
||||||
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -177,18 +194,9 @@ ollama list
|
|||||||
|
|
||||||
## Building
|
## Building
|
||||||
|
|
||||||
Install `cmake` and `go`:
|
See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
||||||
|
|
||||||
```
|
### Running local builds
|
||||||
brew install cmake go
|
|
||||||
```
|
|
||||||
|
|
||||||
Then generate dependencies and build:
|
|
||||||
|
|
||||||
```
|
|
||||||
go generate ./...
|
|
||||||
go build .
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, start the server:
|
Next, start the server:
|
||||||
|
|
||||||
@@ -199,41 +207,85 @@ Next, start the server:
|
|||||||
Finally, in a separate shell, run a model:
|
Finally, in a separate shell, run a model:
|
||||||
|
|
||||||
```
|
```
|
||||||
./ollama run llama2
|
./ollama run llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
## REST API
|
## REST API
|
||||||
|
|
||||||
Ollama has a REST API for running and managing models.
|
Ollama has a REST API for running and managing models.
|
||||||
For example, to generate text from a model:
|
|
||||||
|
### Generate a response
|
||||||
|
|
||||||
```
|
```
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt":"Why is the sky blue?"
|
"prompt":"Why is the sky blue?"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Chat with a model
|
||||||
|
|
||||||
|
```
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "llama3",
|
||||||
|
"messages": [
|
||||||
|
{ "role": "user", "content": "why is the sky blue?" }
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
See the [API documentation](./docs/api.md) for all endpoints.
|
See the [API documentation](./docs/api.md) for all endpoints.
|
||||||
|
|
||||||
## Community Integrations
|
## Community Integrations
|
||||||
|
|
||||||
### Mobile
|
|
||||||
|
|
||||||
- [Mobile Artificial Intelligence Distribution](https://github.com/MaidFoundation/Maid) (Maid)
|
|
||||||
|
|
||||||
### Web & Desktop
|
### Web & Desktop
|
||||||
|
|
||||||
|
- [Open WebUI](https://github.com/open-webui/open-webui)
|
||||||
|
- [Enchanted (macOS native)](https://github.com/AugustDev/enchanted)
|
||||||
|
- [Hollama](https://github.com/fmaclen/hollama)
|
||||||
|
- [Lollms-Webui](https://github.com/ParisNeo/lollms-webui)
|
||||||
|
- [LibreChat](https://github.com/danny-avila/LibreChat)
|
||||||
|
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt)
|
||||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||||
|
- [Saddle](https://github.com/jikkuatwork/saddle)
|
||||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||||
|
- [Chatbot UI v2](https://github.com/mckaywrigley/chatbot-ui)
|
||||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||||
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
||||||
- [Web UI](https://github.com/ollama-webui/ollama-webui)
|
|
||||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||||
- [big-AGI](https://github.com/enricoros/big-agi/blob/main/docs/config-ollama.md)
|
- [big-AGI](https://github.com/enricoros/big-AGI/blob/main/docs/config-local-ollama.md)
|
||||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||||
- [Amica](https://github.com/semperai/amica)
|
- [Amica](https://github.com/semperai/amica)
|
||||||
- [chatd](https://github.com/BruceMacD/chatd)
|
- [chatd](https://github.com/BruceMacD/chatd)
|
||||||
|
- [Ollama-SwiftUI](https://github.com/kghandour/Ollama-SwiftUI)
|
||||||
|
- [Dify.AI](https://github.com/langgenius/dify)
|
||||||
|
- [MindMac](https://mindmac.app)
|
||||||
|
- [NextJS Web Interface for Ollama](https://github.com/jakobhoeg/nextjs-ollama-llm-ui)
|
||||||
|
- [Msty](https://msty.app)
|
||||||
|
- [Chatbox](https://github.com/Bin-Huang/Chatbox)
|
||||||
|
- [WinForm Ollama Copilot](https://github.com/tgraupmann/WinForm_Ollama_Copilot)
|
||||||
|
- [NextChat](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web) with [Get Started Doc](https://docs.nextchat.dev/models/ollama)
|
||||||
|
- [Alpaca WebUI](https://github.com/mmo80/alpaca-webui)
|
||||||
|
- [OllamaGUI](https://github.com/enoch1118/ollamaGUI)
|
||||||
|
- [OpenAOE](https://github.com/InternLM/OpenAOE)
|
||||||
|
- [Odin Runes](https://github.com/leonid20000/OdinRunes)
|
||||||
|
- [LLM-X](https://github.com/mrdjohnson/llm-x) (Progressive Web App)
|
||||||
|
- [AnythingLLM (Docker + MacOs/Windows/Linux native app)](https://github.com/Mintplex-Labs/anything-llm)
|
||||||
|
- [Ollama Basic Chat: Uses HyperDiv Reactive UI](https://github.com/rapidarchitect/ollama_basic_chat)
|
||||||
|
- [Ollama-chats RPG](https://github.com/drazdra/ollama-chats)
|
||||||
|
- [QA-Pilot](https://github.com/reid41/QA-Pilot) (Chat with Code Repository)
|
||||||
|
- [ChatOllama](https://github.com/sugarforever/chat-ollama) (Open Source Chatbot based on Ollama with Knowledge Bases)
|
||||||
|
- [CRAG Ollama Chat](https://github.com/Nagi-ovo/CRAG-Ollama-Chat) (Simple Web Search with Corrective RAG)
|
||||||
|
- [RAGFlow](https://github.com/infiniflow/ragflow) (Open-source Retrieval-Augmented Generation engine based on deep document understanding)
|
||||||
|
- [StreamDeploy](https://github.com/StreamDeploy-DevRel/streamdeploy-llm-app-scaffold) (LLM Application Scaffold)
|
||||||
|
- [chat](https://github.com/swuecho/chat) (chat web app for teams)
|
||||||
|
- [Lobe Chat](https://github.com/lobehub/lobe-chat) with [Integrating Doc](https://lobehub.com/docs/self-hosting/examples/ollama)
|
||||||
|
- [Ollama RAG Chatbot](https://github.com/datvodinh/rag-chatbot.git) (Local Chat with multiple PDFs using Ollama and RAG)
|
||||||
|
- [BrainSoup](https://www.nurgo-software.com/products/brainsoup) (Flexible native client with RAG & multi-agent automation)
|
||||||
|
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
|
||||||
|
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||||
|
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||||
|
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
|
||||||
|
|
||||||
### Terminal
|
### Terminal
|
||||||
|
|
||||||
@@ -242,31 +294,67 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||||
- [gen.nvim](https://github.com/David-Kunz/gen.nvim)
|
- [gen.nvim](https://github.com/David-Kunz/gen.nvim)
|
||||||
- [ollama.nvim](https://github.com/nomnivore/ollama.nvim)
|
- [ollama.nvim](https://github.com/nomnivore/ollama.nvim)
|
||||||
|
- [ollero.nvim](https://github.com/marco-souza/ollero.nvim)
|
||||||
|
- [ollama-chat.nvim](https://github.com/gerazov/ollama-chat.nvim)
|
||||||
- [ogpt.nvim](https://github.com/huynle/ogpt.nvim)
|
- [ogpt.nvim](https://github.com/huynle/ogpt.nvim)
|
||||||
- [gptel Emacs client](https://github.com/karthink/gptel)
|
- [gptel Emacs client](https://github.com/karthink/gptel)
|
||||||
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
||||||
|
- [cmdh](https://github.com/pgibler/cmdh)
|
||||||
|
- [ooo](https://github.com/npahlfer/ooo)
|
||||||
|
- [shell-pilot](https://github.com/reid41/shell-pilot)
|
||||||
|
- [tenere](https://github.com/pythops/tenere)
|
||||||
|
- [llm-ollama](https://github.com/taketwo/llm-ollama) for [Datasette's LLM CLI](https://llm.datasette.io/en/stable/).
|
||||||
|
- [typechat-cli](https://github.com/anaisbetts/typechat-cli)
|
||||||
|
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
||||||
|
- [tlm](https://github.com/yusufcanb/tlm)
|
||||||
|
- [podman-ollama](https://github.com/ericcurtin/podman-ollama)
|
||||||
|
- [gollama](https://github.com/sammcj/gollama)
|
||||||
|
|
||||||
|
### Database
|
||||||
|
|
||||||
|
- [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md) (Connects Ollama models with nearly 200 data platforms and apps)
|
||||||
|
- [chromem-go](https://github.com/philippgille/chromem-go/blob/v0.5.0/embed_ollama.go) with [example](https://github.com/philippgille/chromem-go/tree/v0.5.0/examples/rag-wikipedia-ollama)
|
||||||
|
|
||||||
### Package managers
|
### Package managers
|
||||||
|
|
||||||
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
||||||
|
- [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama)
|
||||||
|
- [Guix channel](https://codeberg.org/tusharhero/ollama-guix)
|
||||||
|
|
||||||
### Libraries
|
### Libraries
|
||||||
|
|
||||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||||
|
- [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java)
|
||||||
|
- [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs)
|
||||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||||
|
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
|
||||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
||||||
|
- [Ollama-hpp for C++](https://github.com/jmont-dev/ollama-hpp)
|
||||||
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
||||||
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
||||||
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
||||||
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
||||||
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel)
|
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel)
|
||||||
|
- [LangChainDart](https://github.com/davidmigloz/langchain_dart)
|
||||||
|
- [Semantic Kernel - Python](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai/ollama)
|
||||||
|
- [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md)
|
||||||
|
- [Elixir LangChain](https://github.com/brainlid/langchain)
|
||||||
|
- [Ollama for R - rollama](https://github.com/JBGruber/rollama)
|
||||||
|
- [Ollama for R - ollama-r](https://github.com/hauselin/ollama-r)
|
||||||
|
- [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex)
|
||||||
|
- [Ollama Connector for SAP ABAP](https://github.com/b-tocs/abap_btocs_ollama)
|
||||||
|
- [Testcontainers](https://testcontainers.com/modules/ollama/)
|
||||||
|
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
|
||||||
|
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
|
||||||
|
- [LlamaScript](https://github.com/Project-Llama/llamascript)
|
||||||
|
|
||||||
### Mobile
|
### Mobile
|
||||||
|
|
||||||
- [Maid](https://github.com/danemadsen/Maid) (Mobile Artificial Intelligence Distribution)
|
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||||
|
- [Maid](https://github.com/Mobile-Artificial-Intelligence/maid)
|
||||||
|
|
||||||
### Extensions & Plugins
|
### Extensions & Plugins
|
||||||
|
|
||||||
@@ -275,9 +363,29 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Continue](https://github.com/continuedev/continue)
|
- [Continue](https://github.com/continuedev/continue)
|
||||||
- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)
|
- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)
|
||||||
- [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq)
|
- [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq)
|
||||||
|
- [NotesOllama](https://github.com/andersrex/notesollama) (Apple Notes Ollama plugin)
|
||||||
- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)
|
- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)
|
||||||
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)
|
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)
|
||||||
|
- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram)
|
||||||
- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)
|
- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)
|
||||||
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
||||||
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
|
||||||
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
||||||
|
- [Cliobot](https://github.com/herval/cliobot) (Telegram bot with Ollama support)
|
||||||
|
- [Copilot for Obsidian plugin](https://github.com/logancyang/obsidian-copilot)
|
||||||
|
- [Obsidian Local GPT plugin](https://github.com/pfrankov/obsidian-local-gpt)
|
||||||
|
- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama)
|
||||||
|
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
||||||
|
- [Ollama Copilot](https://github.com/bernardo-bruning/ollama-copilot) (Proxy that allows you to use ollama as a copilot like Github copilot)
|
||||||
|
- [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama)
|
||||||
|
- [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)
|
||||||
|
- [Page Assist](https://github.com/n4ze3m/page-assist) (Chrome Extension)
|
||||||
|
- [AI Telegram Bot](https://github.com/tusharhero/aitelegrambot) (Telegram bot using Ollama in backend)
|
||||||
|
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
|
||||||
|
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
|
||||||
|
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
|
||||||
|
- [Headless Ollama](https://github.com/nischalj10/headless-ollama) (Scripts to automatically install ollama client & models on any OS for apps that depends on ollama server)
|
||||||
|
|
||||||
|
### Supported backends
|
||||||
|
|
||||||
|
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.
|
||||||
|
|
||||||
|
|||||||
187
api/client.go
187
api/client.go
@@ -1,3 +1,16 @@
|
|||||||
|
// Package api implements the client-side API for code wishing to interact
|
||||||
|
// with the ollama service. The methods of the [Client] type correspond to
|
||||||
|
// the ollama REST API as described in [the API documentation].
|
||||||
|
// The ollama command-line client itself uses this package to interact with
|
||||||
|
// the backend service.
|
||||||
|
//
|
||||||
|
// # Examples
|
||||||
|
//
|
||||||
|
// Several examples of using this package are available [in the GitHub
|
||||||
|
// repository].
|
||||||
|
//
|
||||||
|
// [the API documentation]: https://github.com/ollama/ollama/blob/main/docs/api.md
|
||||||
|
// [in the GitHub repository]: https://github.com/ollama/ollama/tree/main/examples
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -5,23 +18,23 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/format"
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/jmorganca/ollama/version"
|
"github.com/ollama/ollama/format"
|
||||||
|
"github.com/ollama/ollama/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Client encapsulates client state for interacting with the ollama
|
||||||
|
// service. Use [ClientFromEnvironment] to create new Clients.
|
||||||
type Client struct {
|
type Client struct {
|
||||||
base *url.URL
|
base *url.URL
|
||||||
http http.Client
|
http *http.Client
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkError(resp *http.Response, body []byte) error {
|
func checkError(resp *http.Response, body []byte) error {
|
||||||
@@ -40,56 +53,32 @@ func checkError(resp *http.Response, body []byte) error {
|
|||||||
return apiError
|
return apiError
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClientFromEnvironment creates a new [Client] using configuration from the
|
||||||
|
// environment variable OLLAMA_HOST, which points to the network host and
|
||||||
|
// port on which the ollama service is listenting. The format of this variable
|
||||||
|
// is:
|
||||||
|
//
|
||||||
|
// <scheme>://<host>:<port>
|
||||||
|
//
|
||||||
|
// If the variable is not specified, a default ollama host and port will be
|
||||||
|
// used.
|
||||||
func ClientFromEnvironment() (*Client, error) {
|
func ClientFromEnvironment() (*Client, error) {
|
||||||
defaultPort := "11434"
|
ollamaHost := envconfig.Host
|
||||||
|
|
||||||
scheme, hostport, ok := strings.Cut(os.Getenv("OLLAMA_HOST"), "://")
|
return &Client{
|
||||||
switch {
|
|
||||||
case !ok:
|
|
||||||
scheme, hostport = "http", os.Getenv("OLLAMA_HOST")
|
|
||||||
case scheme == "http":
|
|
||||||
defaultPort = "80"
|
|
||||||
case scheme == "https":
|
|
||||||
defaultPort = "443"
|
|
||||||
}
|
|
||||||
|
|
||||||
// trim trailing slashes
|
|
||||||
hostport = strings.TrimRight(hostport, "/")
|
|
||||||
|
|
||||||
host, port, err := net.SplitHostPort(hostport)
|
|
||||||
if err != nil {
|
|
||||||
host, port = "127.0.0.1", defaultPort
|
|
||||||
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
|
||||||
host = ip.String()
|
|
||||||
} else if hostport != "" {
|
|
||||||
host = hostport
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
client := Client{
|
|
||||||
base: &url.URL{
|
base: &url.URL{
|
||||||
Scheme: scheme,
|
Scheme: ollamaHost.Scheme,
|
||||||
Host: net.JoinHostPort(host, port),
|
Host: net.JoinHostPort(ollamaHost.Host, ollamaHost.Port),
|
||||||
},
|
},
|
||||||
}
|
http: http.DefaultClient,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
mockRequest, err := http.NewRequest(http.MethodHead, client.base.String(), nil)
|
func NewClient(base *url.URL, http *http.Client) *Client {
|
||||||
if err != nil {
|
return &Client{
|
||||||
return nil, err
|
base: base,
|
||||||
|
http: http,
|
||||||
}
|
}
|
||||||
|
|
||||||
proxyURL, err := http.ProxyFromEnvironment(mockRequest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
client.http = http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: http.ProxyURL(proxyURL),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return &client, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
|
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
|
||||||
@@ -208,8 +197,14 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerateResponseFunc is a function that [Client.Generate] invokes every time
|
||||||
|
// a response is received from the service. If this function returns an error,
|
||||||
|
// [Client.Generate] will stop generating and return this error.
|
||||||
type GenerateResponseFunc func(GenerateResponse) error
|
type GenerateResponseFunc func(GenerateResponse) error
|
||||||
|
|
||||||
|
// Generate generates a response for a given prompt. The req parameter should
|
||||||
|
// be populated with prompt details. fn is called for each response (there may
|
||||||
|
// be multiple responses, e.g. in case streaming is enabled).
|
||||||
func (c *Client) Generate(ctx context.Context, req *GenerateRequest, fn GenerateResponseFunc) error {
|
func (c *Client) Generate(ctx context.Context, req *GenerateRequest, fn GenerateResponseFunc) error {
|
||||||
return c.stream(ctx, http.MethodPost, "/api/generate", req, func(bts []byte) error {
|
return c.stream(ctx, http.MethodPost, "/api/generate", req, func(bts []byte) error {
|
||||||
var resp GenerateResponse
|
var resp GenerateResponse
|
||||||
@@ -221,8 +216,34 @@ func (c *Client) Generate(ctx context.Context, req *GenerateRequest, fn Generate
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ChatResponseFunc is a function that [Client.Chat] invokes every time
|
||||||
|
// a response is received from the service. If this function returns an error,
|
||||||
|
// [Client.Chat] will stop generating and return this error.
|
||||||
|
type ChatResponseFunc func(ChatResponse) error
|
||||||
|
|
||||||
|
// Chat generates the next message in a chat. [ChatRequest] may contain a
|
||||||
|
// sequence of messages which can be used to maintain chat history with a model.
|
||||||
|
// fn is called for each response (there may be multiple responses, e.g. if case
|
||||||
|
// streaming is enabled).
|
||||||
|
func (c *Client) Chat(ctx context.Context, req *ChatRequest, fn ChatResponseFunc) error {
|
||||||
|
return c.stream(ctx, http.MethodPost, "/api/chat", req, func(bts []byte) error {
|
||||||
|
var resp ChatResponse
|
||||||
|
if err := json.Unmarshal(bts, &resp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return fn(resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// PullProgressFunc is a function that [Client.Pull] invokes every time there
|
||||||
|
// is progress with a "pull" request sent to the service. If this function
|
||||||
|
// returns an error, [Client.Pull] will stop the process and return this error.
|
||||||
type PullProgressFunc func(ProgressResponse) error
|
type PullProgressFunc func(ProgressResponse) error
|
||||||
|
|
||||||
|
// Pull downloads a model from the ollama library. fn is called each time
|
||||||
|
// progress is made on the request and can be used to display a progress bar,
|
||||||
|
// etc.
|
||||||
func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc) error {
|
func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc) error {
|
||||||
return c.stream(ctx, http.MethodPost, "/api/pull", req, func(bts []byte) error {
|
return c.stream(ctx, http.MethodPost, "/api/pull", req, func(bts []byte) error {
|
||||||
var resp ProgressResponse
|
var resp ProgressResponse
|
||||||
@@ -234,8 +255,14 @@ func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PushProgressFunc is a function that [Client.Push] invokes when progress is
|
||||||
|
// made.
|
||||||
|
// It's similar to other progress function types like [PullProgressFunc].
|
||||||
type PushProgressFunc func(ProgressResponse) error
|
type PushProgressFunc func(ProgressResponse) error
|
||||||
|
|
||||||
|
// Push uploads a model to the model library; requires registering for ollama.ai
|
||||||
|
// and adding a public key first. fn is called each time progress is made on
|
||||||
|
// the request and can be used to display a progress bar, etc.
|
||||||
func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc) error {
|
func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc) error {
|
||||||
return c.stream(ctx, http.MethodPost, "/api/push", req, func(bts []byte) error {
|
return c.stream(ctx, http.MethodPost, "/api/push", req, func(bts []byte) error {
|
||||||
var resp ProgressResponse
|
var resp ProgressResponse
|
||||||
@@ -247,8 +274,15 @@ func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateProgressFunc is a function that [Client.Create] invokes when progress
|
||||||
|
// is made.
|
||||||
|
// It's similar to other progress function types like [PullProgressFunc].
|
||||||
type CreateProgressFunc func(ProgressResponse) error
|
type CreateProgressFunc func(ProgressResponse) error
|
||||||
|
|
||||||
|
// Create creates a model from a [Modelfile]. fn is a progress function that
|
||||||
|
// behaves similarly to other methods (see [Client.Pull]).
|
||||||
|
//
|
||||||
|
// [Modelfile]: https://github.com/ollama/ollama/blob/main/docs/modelfile.md
|
||||||
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
|
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
|
||||||
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
|
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
|
||||||
var resp ProgressResponse
|
var resp ProgressResponse
|
||||||
@@ -260,6 +294,7 @@ func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgre
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List lists models that are available locally.
|
||||||
func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
||||||
var lr ListResponse
|
var lr ListResponse
|
||||||
if err := c.do(ctx, http.MethodGet, "/api/tags", nil, &lr); err != nil {
|
if err := c.do(ctx, http.MethodGet, "/api/tags", nil, &lr); err != nil {
|
||||||
@@ -268,6 +303,17 @@ func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
|||||||
return &lr, nil
|
return &lr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List running models.
|
||||||
|
func (c *Client) ListRunning(ctx context.Context) (*ProcessResponse, error) {
|
||||||
|
var lr ProcessResponse
|
||||||
|
if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &lr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy copies a model - creating a model with another name from an existing
|
||||||
|
// model.
|
||||||
func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {
|
func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {
|
||||||
if err := c.do(ctx, http.MethodPost, "/api/copy", req, nil); err != nil {
|
if err := c.do(ctx, http.MethodPost, "/api/copy", req, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -275,6 +321,7 @@ func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete deletes a model and its data.
|
||||||
func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
|
func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
|
||||||
if err := c.do(ctx, http.MethodDelete, "/api/delete", req, nil); err != nil {
|
if err := c.do(ctx, http.MethodDelete, "/api/delete", req, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -282,6 +329,7 @@ func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Show obtains model information, including details, modelfile, license etc.
|
||||||
func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, error) {
|
func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, error) {
|
||||||
var resp ShowResponse
|
var resp ShowResponse
|
||||||
if err := c.do(ctx, http.MethodPost, "/api/show", req, &resp); err != nil {
|
if err := c.do(ctx, http.MethodPost, "/api/show", req, &resp); err != nil {
|
||||||
@@ -290,6 +338,8 @@ func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, err
|
|||||||
return &resp, nil
|
return &resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Hearbeat checks if the server has started and is responsive; if yes, it
|
||||||
|
// returns nil, otherwise an error.
|
||||||
func (c *Client) Heartbeat(ctx context.Context) error {
|
func (c *Client) Heartbeat(ctx context.Context) error {
|
||||||
if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil {
|
if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -297,17 +347,30 @@ func (c *Client) Heartbeat(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
// Embeddings generates embeddings from a model.
|
||||||
if err := c.do(ctx, http.MethodHead, fmt.Sprintf("/api/blobs/%s", digest), nil, nil); err != nil {
|
func (c *Client) Embeddings(ctx context.Context, req *EmbeddingRequest) (*EmbeddingResponse, error) {
|
||||||
var statusError StatusError
|
var resp EmbeddingResponse
|
||||||
if !errors.As(err, &statusError) || statusError.StatusCode != http.StatusNotFound {
|
if err := c.do(ctx, http.MethodPost, "/api/embeddings", req, &resp); err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
return &resp, nil
|
||||||
if err := c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil); err != nil {
|
}
|
||||||
return err
|
|
||||||
}
|
// CreateBlob creates a blob from a file on the server. digest is the
|
||||||
}
|
// expected SHA256 digest of the file, and r represents the file.
|
||||||
|
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
||||||
return nil
|
return c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the Ollama server version as a string.
|
||||||
|
func (c *Client) Version(ctx context.Context) (string, error) {
|
||||||
|
var version struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.do(ctx, http.MethodGet, "/api/version", nil, &version); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return version.Version, nil
|
||||||
}
|
}
|
||||||
|
|||||||
284
api/client.py
284
api/client.py
@@ -1,284 +0,0 @@
|
|||||||
import os
|
|
||||||
import json
|
|
||||||
import requests
|
|
||||||
import os
|
|
||||||
import hashlib
|
|
||||||
import json
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
|
||||||
|
|
||||||
# Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses.
|
|
||||||
# The final response object will include statistics and additional data from the request. Use the callback function to override
|
|
||||||
# the default handler.
|
|
||||||
def generate(model_name, prompt, system=None, template=None, format="", context=None, options=None, callback=None):
|
|
||||||
try:
|
|
||||||
url = f"{BASE_URL}/api/generate"
|
|
||||||
payload = {
|
|
||||||
"model": model_name,
|
|
||||||
"prompt": prompt,
|
|
||||||
"system": system,
|
|
||||||
"template": template,
|
|
||||||
"context": context,
|
|
||||||
"options": options,
|
|
||||||
"format": format,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Remove keys with None values
|
|
||||||
payload = {k: v for k, v in payload.items() if v is not None}
|
|
||||||
|
|
||||||
with requests.post(url, json=payload, stream=True) as response:
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
# Creating a variable to hold the context history of the final chunk
|
|
||||||
final_context = None
|
|
||||||
|
|
||||||
# Variable to hold concatenated response strings if no callback is provided
|
|
||||||
full_response = ""
|
|
||||||
|
|
||||||
# Iterating over the response line by line and displaying the details
|
|
||||||
for line in response.iter_lines():
|
|
||||||
if line:
|
|
||||||
# Parsing each line (JSON chunk) and extracting the details
|
|
||||||
chunk = json.loads(line)
|
|
||||||
|
|
||||||
# If a callback function is provided, call it with the chunk
|
|
||||||
if callback:
|
|
||||||
callback(chunk)
|
|
||||||
else:
|
|
||||||
# If this is not the last chunk, add the "response" field value to full_response and print it
|
|
||||||
if not chunk.get("done"):
|
|
||||||
response_piece = chunk.get("response", "")
|
|
||||||
full_response += response_piece
|
|
||||||
print(response_piece, end="", flush=True)
|
|
||||||
|
|
||||||
# Check if it's the last chunk (done is true)
|
|
||||||
if chunk.get("done"):
|
|
||||||
final_context = chunk.get("context")
|
|
||||||
|
|
||||||
# Return the full response and the final context
|
|
||||||
return full_response, final_context
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
return None, None
|
|
||||||
|
|
||||||
|
|
||||||
# Create a blob file on the server if it doesn't exist.
|
|
||||||
def create_blob(digest, file_path):
|
|
||||||
url = f"{BASE_URL}/api/blobs/{digest}"
|
|
||||||
|
|
||||||
# Check if the blob exists
|
|
||||||
response = requests.head(url)
|
|
||||||
if response.status_code != 404:
|
|
||||||
return # Blob already exists, no need to upload
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
# Upload the blob
|
|
||||||
with open(file_path, 'rb') as file_data:
|
|
||||||
requests.post(url, data=file_data)
|
|
||||||
|
|
||||||
|
|
||||||
# Create a model from a Modelfile. Use the callback function to override the default handler.
|
|
||||||
def create(model_name, filename, callback=None):
|
|
||||||
try:
|
|
||||||
file_path = Path(filename).expanduser().resolve()
|
|
||||||
processed_lines = []
|
|
||||||
|
|
||||||
# Read and process the modelfile
|
|
||||||
with open(file_path, 'r') as f:
|
|
||||||
for line in f:
|
|
||||||
# Skip empty or whitespace-only lines
|
|
||||||
if not line.strip():
|
|
||||||
continue
|
|
||||||
|
|
||||||
command, args = line.split(maxsplit=1)
|
|
||||||
|
|
||||||
if command.upper() in ["FROM", "ADAPTER"]:
|
|
||||||
path = Path(args.strip()).expanduser()
|
|
||||||
|
|
||||||
# Check if path is relative and resolve it
|
|
||||||
if not path.is_absolute():
|
|
||||||
path = (file_path.parent / path)
|
|
||||||
|
|
||||||
# Skip if file does not exist for "model", this is handled by the server
|
|
||||||
if not path.exists():
|
|
||||||
processed_lines.append(line)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Calculate SHA-256 hash
|
|
||||||
with open(path, 'rb') as bin_file:
|
|
||||||
hash = hashlib.sha256()
|
|
||||||
hash.update(bin_file.read())
|
|
||||||
blob = f"sha256:{hash.hexdigest()}"
|
|
||||||
|
|
||||||
# Add the file to the remote server
|
|
||||||
create_blob(blob, path)
|
|
||||||
|
|
||||||
# Replace path with digest in the line
|
|
||||||
line = f"{command} @{blob}\n"
|
|
||||||
|
|
||||||
processed_lines.append(line)
|
|
||||||
|
|
||||||
# Combine processed lines back into a single string
|
|
||||||
modelfile_content = '\n'.join(processed_lines)
|
|
||||||
|
|
||||||
url = f"{BASE_URL}/api/create"
|
|
||||||
payload = {"name": model_name, "modelfile": modelfile_content}
|
|
||||||
|
|
||||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
|
||||||
with requests.post(url, json=payload, stream=True) as response:
|
|
||||||
response.raise_for_status()
|
|
||||||
# Iterating over the response line by line and displaying the status
|
|
||||||
for line in response.iter_lines():
|
|
||||||
if line:
|
|
||||||
chunk = json.loads(line)
|
|
||||||
if callback:
|
|
||||||
callback(chunk)
|
|
||||||
else:
|
|
||||||
print(f"Status: {chunk.get('status')}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
# Pull a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple
|
|
||||||
# calls to will share the same download progress. Use the callback function to override the default handler.
|
|
||||||
def pull(model_name, insecure=False, callback=None):
|
|
||||||
try:
|
|
||||||
url = f"{BASE_URL}/api/pull"
|
|
||||||
payload = {
|
|
||||||
"name": model_name,
|
|
||||||
"insecure": insecure
|
|
||||||
}
|
|
||||||
|
|
||||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
|
||||||
with requests.post(url, json=payload, stream=True) as response:
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
# Iterating over the response line by line and displaying the details
|
|
||||||
for line in response.iter_lines():
|
|
||||||
if line:
|
|
||||||
# Parsing each line (JSON chunk) and extracting the details
|
|
||||||
chunk = json.loads(line)
|
|
||||||
|
|
||||||
# If a callback function is provided, call it with the chunk
|
|
||||||
if callback:
|
|
||||||
callback(chunk)
|
|
||||||
else:
|
|
||||||
# Print the status message directly to the console
|
|
||||||
print(chunk.get('status', ''), end='', flush=True)
|
|
||||||
|
|
||||||
# If there's layer data, you might also want to print that (adjust as necessary)
|
|
||||||
if 'digest' in chunk:
|
|
||||||
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
|
||||||
print(f" - Total: {chunk['total']}", end='', flush=True)
|
|
||||||
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
|
||||||
else:
|
|
||||||
print()
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
|
|
||||||
# Push a model to the model registry. Use the callback function to override the default handler.
|
|
||||||
def push(model_name, insecure=False, callback=None):
|
|
||||||
try:
|
|
||||||
url = f"{BASE_URL}/api/push"
|
|
||||||
payload = {
|
|
||||||
"name": model_name,
|
|
||||||
"insecure": insecure
|
|
||||||
}
|
|
||||||
|
|
||||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
|
||||||
with requests.post(url, json=payload, stream=True) as response:
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
# Iterating over the response line by line and displaying the details
|
|
||||||
for line in response.iter_lines():
|
|
||||||
if line:
|
|
||||||
# Parsing each line (JSON chunk) and extracting the details
|
|
||||||
chunk = json.loads(line)
|
|
||||||
|
|
||||||
# If a callback function is provided, call it with the chunk
|
|
||||||
if callback:
|
|
||||||
callback(chunk)
|
|
||||||
else:
|
|
||||||
# Print the status message directly to the console
|
|
||||||
print(chunk.get('status', ''), end='', flush=True)
|
|
||||||
|
|
||||||
# If there's layer data, you might also want to print that (adjust as necessary)
|
|
||||||
if 'digest' in chunk:
|
|
||||||
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
|
||||||
print(f" - Total: {chunk['total']}", end='', flush=True)
|
|
||||||
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
|
||||||
else:
|
|
||||||
print()
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
|
|
||||||
# List models that are available locally.
|
|
||||||
def list():
|
|
||||||
try:
|
|
||||||
response = requests.get(f"{BASE_URL}/api/tags")
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
models = data.get('models', [])
|
|
||||||
return models
|
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Copy a model. Creates a model with another name from an existing model.
|
|
||||||
def copy(source, destination):
|
|
||||||
try:
|
|
||||||
# Create the JSON payload
|
|
||||||
payload = {
|
|
||||||
"source": source,
|
|
||||||
"destination": destination
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(f"{BASE_URL}/api/copy", json=payload)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
# If the request was successful, return a message indicating that the copy was successful
|
|
||||||
return "Copy successful"
|
|
||||||
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Delete a model and its data.
|
|
||||||
def delete(model_name):
|
|
||||||
try:
|
|
||||||
url = f"{BASE_URL}/api/delete"
|
|
||||||
payload = {"name": model_name}
|
|
||||||
response = requests.delete(url, json=payload)
|
|
||||||
response.raise_for_status()
|
|
||||||
return "Delete successful"
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Show info about a model.
|
|
||||||
def show(model_name):
|
|
||||||
try:
|
|
||||||
url = f"{BASE_URL}/api/show"
|
|
||||||
payload = {"name": model_name}
|
|
||||||
response = requests.post(url, json=payload)
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
# Parse the JSON response and return it
|
|
||||||
data = response.json()
|
|
||||||
return data
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
def heartbeat():
|
|
||||||
try:
|
|
||||||
url = f"{BASE_URL}/"
|
|
||||||
response = requests.head(url)
|
|
||||||
response.raise_for_status()
|
|
||||||
return "Ollama is running"
|
|
||||||
except requests.exceptions.RequestException as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
return "Ollama is not running"
|
|
||||||
@@ -1,6 +1,10 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import "testing"
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
|
)
|
||||||
|
|
||||||
func TestClientFromEnvironment(t *testing.T) {
|
func TestClientFromEnvironment(t *testing.T) {
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
@@ -29,6 +33,7 @@ func TestClientFromEnvironment(t *testing.T) {
|
|||||||
for k, v := range testCases {
|
for k, v := range testCases {
|
||||||
t.Run(k, func(t *testing.T) {
|
t.Run(k, func(t *testing.T) {
|
||||||
t.Setenv("OLLAMA_HOST", v.value)
|
t.Setenv("OLLAMA_HOST", v.value)
|
||||||
|
envconfig.LoadConfig()
|
||||||
|
|
||||||
client, err := ClientFromEnvironment()
|
client, err := ClientFromEnvironment()
|
||||||
if err != v.err {
|
if err != v.err {
|
||||||
|
|||||||
301
api/types.go
301
api/types.go
@@ -3,6 +3,7 @@ package api
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
@@ -11,6 +12,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// StatusError is an error with and HTTP status code.
|
||||||
type StatusError struct {
|
type StatusError struct {
|
||||||
StatusCode int
|
StatusCode int
|
||||||
Status string
|
Status string
|
||||||
@@ -31,20 +33,107 @@ func (e StatusError) Error() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageData represents the raw binary data of an image file.
|
||||||
|
type ImageData []byte
|
||||||
|
|
||||||
|
// GenerateRequest describes a request sent by [Client.Generate]. While you
|
||||||
|
// have to specify the Model and Prompt fields, all the other fields have
|
||||||
|
// reasonable defaults for basic uses.
|
||||||
type GenerateRequest struct {
|
type GenerateRequest struct {
|
||||||
|
// Model is the model name; it should be a name familiar to Ollama from
|
||||||
|
// the library at https://ollama.com/library
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
|
|
||||||
|
// Prompt is the textual prompt to send to the model.
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
|
|
||||||
|
// System overrides the model's default system message/prompt.
|
||||||
System string `json:"system"`
|
System string `json:"system"`
|
||||||
|
|
||||||
|
// Template overrides the model's default prompt template.
|
||||||
Template string `json:"template"`
|
Template string `json:"template"`
|
||||||
|
|
||||||
|
// Context is the context parameter returned from a previous call to
|
||||||
|
// Generate call. It can be used to keep a short conversational memory.
|
||||||
Context []int `json:"context,omitempty"`
|
Context []int `json:"context,omitempty"`
|
||||||
|
|
||||||
|
// Stream specifies whether the response is streaming; it is true by default.
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
|
||||||
|
// Raw set to true means that no formatting will be applied to the prompt.
|
||||||
Raw bool `json:"raw,omitempty"`
|
Raw bool `json:"raw,omitempty"`
|
||||||
|
|
||||||
|
// Format specifies the format to return a response in.
|
||||||
Format string `json:"format"`
|
Format string `json:"format"`
|
||||||
|
|
||||||
|
// KeepAlive controls how long the model will stay loaded in memory following
|
||||||
|
// this request.
|
||||||
|
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||||
|
|
||||||
|
// Images is an optional list of base64-encoded images accompanying this
|
||||||
|
// request, for multimodal models.
|
||||||
|
Images []ImageData `json:"images,omitempty"`
|
||||||
|
|
||||||
|
// Options lists model-specific options. For example, temperature can be
|
||||||
|
// set through this field, if the model supports it.
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options specfied in GenerateRequest, if you add a new option here add it to the API docs also
|
// ChatRequest describes a request sent by [Client.Chat].
|
||||||
|
type ChatRequest struct {
|
||||||
|
// Model is the model name, as in [GenerateRequest].
|
||||||
|
Model string `json:"model"`
|
||||||
|
|
||||||
|
// Messages is the messages of the chat - can be used to keep a chat memory.
|
||||||
|
Messages []Message `json:"messages"`
|
||||||
|
|
||||||
|
// Stream enable streaming of returned response; true by default.
|
||||||
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
|
||||||
|
// Format is the format to return the response in (e.g. "json").
|
||||||
|
Format string `json:"format"`
|
||||||
|
|
||||||
|
// KeepAlive controls how long the model will stay loaded into memory
|
||||||
|
// followin the request.
|
||||||
|
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||||
|
|
||||||
|
// Options lists model-specific options.
|
||||||
|
Options map[string]interface{} `json:"options"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Message is a single message in a chat sequence. The message contains the
|
||||||
|
// role ("system", "user", or "assistant"), the content and an optional list
|
||||||
|
// of images.
|
||||||
|
type Message struct {
|
||||||
|
Role string `json:"role"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Images []ImageData `json:"images,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChatResponse is the response returned by [Client.Chat]. Its fields are
|
||||||
|
// similar to [GenerateResponse].
|
||||||
|
type ChatResponse struct {
|
||||||
|
Model string `json:"model"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
Message Message `json:"message"`
|
||||||
|
DoneReason string `json:"done_reason,omitempty"`
|
||||||
|
|
||||||
|
Done bool `json:"done"`
|
||||||
|
|
||||||
|
Metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
type Metrics struct {
|
||||||
|
TotalDuration time.Duration `json:"total_duration,omitempty"`
|
||||||
|
LoadDuration time.Duration `json:"load_duration,omitempty"`
|
||||||
|
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
|
||||||
|
PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"`
|
||||||
|
EvalCount int `json:"eval_count,omitempty"`
|
||||||
|
EvalDuration time.Duration `json:"eval_duration,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options specified in [GenerateRequest], if you add a new option here add it
|
||||||
|
// to the API docs also.
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Runner
|
Runner
|
||||||
|
|
||||||
@@ -73,7 +162,6 @@ type Runner struct {
|
|||||||
UseNUMA bool `json:"numa,omitempty"`
|
UseNUMA bool `json:"numa,omitempty"`
|
||||||
NumCtx int `json:"num_ctx,omitempty"`
|
NumCtx int `json:"num_ctx,omitempty"`
|
||||||
NumBatch int `json:"num_batch,omitempty"`
|
NumBatch int `json:"num_batch,omitempty"`
|
||||||
NumGQA int `json:"num_gqa,omitempty"`
|
|
||||||
NumGPU int `json:"num_gpu,omitempty"`
|
NumGPU int `json:"num_gpu,omitempty"`
|
||||||
MainGPU int `json:"main_gpu,omitempty"`
|
MainGPU int `json:"main_gpu,omitempty"`
|
||||||
LowVRAM bool `json:"low_vram,omitempty"`
|
LowVRAM bool `json:"low_vram,omitempty"`
|
||||||
@@ -82,59 +170,96 @@ type Runner struct {
|
|||||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||||
UseMMap bool `json:"use_mmap,omitempty"`
|
UseMMap bool `json:"use_mmap,omitempty"`
|
||||||
UseMLock bool `json:"use_mlock,omitempty"`
|
UseMLock bool `json:"use_mlock,omitempty"`
|
||||||
EmbeddingOnly bool `json:"embedding_only,omitempty"`
|
|
||||||
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
|
|
||||||
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
|
|
||||||
NumThread int `json:"num_thread,omitempty"`
|
NumThread int `json:"num_thread,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EmbeddingRequest is the request passed to [Client.Embeddings].
|
||||||
type EmbeddingRequest struct {
|
type EmbeddingRequest struct {
|
||||||
|
// Model is the model name.
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
|
|
||||||
|
// Prompt is the textual prompt to embed.
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
|
|
||||||
|
// KeepAlive controls how long the model will stay loaded in memory following
|
||||||
|
// this request.
|
||||||
|
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||||
|
|
||||||
|
// Options lists model-specific options.
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EmbeddingResponse is the response from [Client.Embeddings].
|
||||||
type EmbeddingResponse struct {
|
type EmbeddingResponse struct {
|
||||||
Embedding []float64 `json:"embedding"`
|
Embedding []float64 `json:"embedding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CreateRequest is the request passed to [Client.Create].
|
||||||
type CreateRequest struct {
|
type CreateRequest struct {
|
||||||
Name string `json:"name"`
|
Model string `json:"model"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Modelfile string `json:"modelfile"`
|
Modelfile string `json:"modelfile"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
Quantize string `json:"quantize,omitempty"`
|
||||||
|
|
||||||
|
// Name is deprecated, see Model
|
||||||
|
Name string `json:"name"`
|
||||||
|
|
||||||
|
// Quantization is deprecated, see Quantize
|
||||||
|
Quantization string `json:"quantization,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteRequest is the request passed to [Client.Delete].
|
||||||
type DeleteRequest struct {
|
type DeleteRequest struct {
|
||||||
|
Model string `json:"model"`
|
||||||
|
|
||||||
|
// Name is deprecated, see Model
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShowRequest is the request passed to [Client.Show].
|
||||||
type ShowRequest struct {
|
type ShowRequest struct {
|
||||||
|
Model string `json:"model"`
|
||||||
|
System string `json:"system"`
|
||||||
|
Template string `json:"template"`
|
||||||
|
|
||||||
|
Options map[string]interface{} `json:"options"`
|
||||||
|
|
||||||
|
// Name is deprecated, see Model
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShowResponse is the response returned from [Client.Show].
|
||||||
type ShowResponse struct {
|
type ShowResponse struct {
|
||||||
License string `json:"license,omitempty"`
|
License string `json:"license,omitempty"`
|
||||||
Modelfile string `json:"modelfile,omitempty"`
|
Modelfile string `json:"modelfile,omitempty"`
|
||||||
Parameters string `json:"parameters,omitempty"`
|
Parameters string `json:"parameters,omitempty"`
|
||||||
Template string `json:"template,omitempty"`
|
Template string `json:"template,omitempty"`
|
||||||
System string `json:"system,omitempty"`
|
System string `json:"system,omitempty"`
|
||||||
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
|
Messages []Message `json:"messages,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CopyRequest is the request passed to [Client.Copy].
|
||||||
type CopyRequest struct {
|
type CopyRequest struct {
|
||||||
Source string `json:"source"`
|
Source string `json:"source"`
|
||||||
Destination string `json:"destination"`
|
Destination string `json:"destination"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PullRequest is the request passed to [Client.Pull].
|
||||||
type PullRequest struct {
|
type PullRequest struct {
|
||||||
Name string `json:"name"`
|
Model string `json:"model"`
|
||||||
Insecure bool `json:"insecure,omitempty"`
|
Insecure bool `json:"insecure,omitempty"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
|
||||||
|
// Name is deprecated, see Model
|
||||||
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProgressResponse is the response passed to progress functions like
|
||||||
|
// [PullProgressFunc] and [PushProgressFunc].
|
||||||
type ProgressResponse struct {
|
type ProgressResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Digest string `json:"digest,omitempty"`
|
Digest string `json:"digest,omitempty"`
|
||||||
@@ -142,74 +267,114 @@ type ProgressResponse struct {
|
|||||||
Completed int64 `json:"completed,omitempty"`
|
Completed int64 `json:"completed,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PushRequest is the request passed to [Client.Push].
|
||||||
type PushRequest struct {
|
type PushRequest struct {
|
||||||
Name string `json:"name"`
|
Model string `json:"model"`
|
||||||
Insecure bool `json:"insecure,omitempty"`
|
Insecure bool `json:"insecure,omitempty"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
}
|
|
||||||
|
|
||||||
type ListResponse struct {
|
// Name is deprecated, see Model
|
||||||
Models []ModelResponse `json:"models"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ModelResponse struct {
|
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListResponse is the response from [Client.List].
|
||||||
|
type ListResponse struct {
|
||||||
|
Models []ListModelResponse `json:"models"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessResponse is the response from [Client.Process].
|
||||||
|
type ProcessResponse struct {
|
||||||
|
Models []ProcessModelResponse `json:"models"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListModelResponse is a single model description in [ListResponse].
|
||||||
|
type ListModelResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Model string `json:"model"`
|
||||||
ModifiedAt time.Time `json:"modified_at"`
|
ModifiedAt time.Time `json:"modified_at"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Digest string `json:"digest"`
|
Digest string `json:"digest"`
|
||||||
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessModelResponse is a single model description in [ProcessResponse].
|
||||||
|
type ProcessModelResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Model string `json:"model"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Digest string `json:"digest"`
|
||||||
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
|
ExpiresAt time.Time `json:"expires_at"`
|
||||||
|
SizeVRAM int64 `json:"size_vram"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TokenResponse struct {
|
type TokenResponse struct {
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GenerateResponse is the response passed into [GenerateResponseFunc].
|
||||||
type GenerateResponse struct {
|
type GenerateResponse struct {
|
||||||
|
// Model is the model name that generated the response.
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
|
|
||||||
|
// CreatedAt is the timestamp of the response.
|
||||||
CreatedAt time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
|
||||||
|
// Response is the textual response itself.
|
||||||
Response string `json:"response"`
|
Response string `json:"response"`
|
||||||
|
|
||||||
|
// Done specifies if the response is complete.
|
||||||
Done bool `json:"done"`
|
Done bool `json:"done"`
|
||||||
|
|
||||||
|
// DoneReason is the reason the model stopped generating text.
|
||||||
|
DoneReason string `json:"done_reason,omitempty"`
|
||||||
|
|
||||||
|
// Context is an encoding of the conversation used in this response; this
|
||||||
|
// can be sent in the next request to keep a conversational memory.
|
||||||
Context []int `json:"context,omitempty"`
|
Context []int `json:"context,omitempty"`
|
||||||
|
|
||||||
TotalDuration time.Duration `json:"total_duration,omitempty"`
|
Metrics
|
||||||
LoadDuration time.Duration `json:"load_duration,omitempty"`
|
|
||||||
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
|
|
||||||
PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"`
|
|
||||||
EvalCount int `json:"eval_count,omitempty"`
|
|
||||||
EvalDuration time.Duration `json:"eval_duration,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *GenerateResponse) Summary() {
|
// ModelDetails provides details about a model.
|
||||||
if r.TotalDuration > 0 {
|
type ModelDetails struct {
|
||||||
fmt.Fprintf(os.Stderr, "total duration: %v\n", r.TotalDuration)
|
ParentModel string `json:"parent_model"`
|
||||||
}
|
Format string `json:"format"`
|
||||||
|
Family string `json:"family"`
|
||||||
if r.LoadDuration > 0 {
|
Families []string `json:"families"`
|
||||||
fmt.Fprintf(os.Stderr, "load duration: %v\n", r.LoadDuration)
|
ParameterSize string `json:"parameter_size"`
|
||||||
}
|
QuantizationLevel string `json:"quantization_level"`
|
||||||
|
|
||||||
if r.PromptEvalCount > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.PromptEvalDuration > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", r.PromptEvalDuration)
|
|
||||||
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(r.PromptEvalCount)/r.PromptEvalDuration.Seconds())
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.EvalCount > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", r.EvalCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.EvalDuration > 0 {
|
|
||||||
fmt.Fprintf(os.Stderr, "eval duration: %s\n", r.EvalDuration)
|
|
||||||
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(r.EvalCount)/r.EvalDuration.Seconds())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrInvalidOpts = fmt.Errorf("invalid options")
|
func (m *Metrics) Summary() {
|
||||||
|
if m.TotalDuration > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "total duration: %v\n", m.TotalDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.LoadDuration > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "load duration: %v\n", m.LoadDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.PromptEvalCount > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", m.PromptEvalCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.PromptEvalDuration > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", m.PromptEvalDuration)
|
||||||
|
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(m.PromptEvalCount)/m.PromptEvalDuration.Seconds())
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.EvalCount > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", m.EvalCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.EvalDuration > 0 {
|
||||||
|
fmt.Fprintf(os.Stderr, "eval duration: %s\n", m.EvalDuration)
|
||||||
|
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(m.EvalCount)/m.EvalDuration.Seconds())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (opts *Options) FromMap(m map[string]interface{}) error {
|
func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||||
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
||||||
@@ -224,9 +389,13 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
invalidOpts := []string{}
|
|
||||||
for key, val := range m {
|
for key, val := range m {
|
||||||
if opt, ok := jsonOpts[key]; ok {
|
opt, ok := jsonOpts[key]
|
||||||
|
if !ok {
|
||||||
|
slog.Warn("invalid option provided", "option", opt.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
field := valueOpts.FieldByName(opt.Name)
|
field := valueOpts.FieldByName(opt.Name)
|
||||||
if field.IsValid() && field.CanSet() {
|
if field.IsValid() && field.CanSet() {
|
||||||
if val == nil {
|
if val == nil {
|
||||||
@@ -283,22 +452,20 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
|||||||
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
invalidOpts = append(invalidOpts, key)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(invalidOpts) > 0 {
|
|
||||||
return fmt.Errorf("%w: %v", ErrInvalidOpts, strings.Join(invalidOpts, ", "))
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultOptions is the default set of options for [GenerateRequest]; these
|
||||||
|
// values are used unless the user specifies other values explicitly.
|
||||||
func DefaultOptions() Options {
|
func DefaultOptions() Options {
|
||||||
return Options{
|
return Options{
|
||||||
// options set on request to runner
|
// options set on request to runner
|
||||||
NumPredict: -1,
|
NumPredict: -1,
|
||||||
NumKeep: 0,
|
|
||||||
|
// set a minimal num_keep to avoid issues on context shifts
|
||||||
|
NumKeep: 4,
|
||||||
Temperature: 0.8,
|
Temperature: 0.8,
|
||||||
TopK: 40,
|
TopK: 40,
|
||||||
TopP: 0.9,
|
TopP: 0.9,
|
||||||
@@ -317,18 +484,14 @@ func DefaultOptions() Options {
|
|||||||
Runner: Runner{
|
Runner: Runner{
|
||||||
// options set when the model is loaded
|
// options set when the model is loaded
|
||||||
NumCtx: 2048,
|
NumCtx: 2048,
|
||||||
RopeFrequencyBase: 10000.0,
|
|
||||||
RopeFrequencyScale: 1.0,
|
|
||||||
NumBatch: 512,
|
NumBatch: 512,
|
||||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||||
NumGQA: 1,
|
|
||||||
NumThread: 0, // let the runtime decide
|
NumThread: 0, // let the runtime decide
|
||||||
LowVRAM: false,
|
LowVRAM: false,
|
||||||
F16KV: true,
|
F16KV: true,
|
||||||
UseMLock: false,
|
UseMLock: false,
|
||||||
UseMMap: true,
|
UseMMap: true,
|
||||||
UseNUMA: false,
|
UseNUMA: false,
|
||||||
EmbeddingOnly: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -337,6 +500,13 @@ type Duration struct {
|
|||||||
time.Duration
|
time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||||
|
if d.Duration < 0 {
|
||||||
|
return []byte("-1"), nil
|
||||||
|
}
|
||||||
|
return []byte("\"" + d.Duration.String() + "\""), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
||||||
var v any
|
var v any
|
||||||
if err := json.Unmarshal(b, &v); err != nil {
|
if err := json.Unmarshal(b, &v); err != nil {
|
||||||
@@ -348,15 +518,20 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
|||||||
switch t := v.(type) {
|
switch t := v.(type) {
|
||||||
case float64:
|
case float64:
|
||||||
if t < 0 {
|
if t < 0 {
|
||||||
t = math.MaxFloat64
|
d.Duration = time.Duration(math.MaxInt64)
|
||||||
|
} else {
|
||||||
|
d.Duration = time.Duration(int(t) * int(time.Second))
|
||||||
}
|
}
|
||||||
|
|
||||||
d.Duration = time.Duration(t)
|
|
||||||
case string:
|
case string:
|
||||||
d.Duration, err = time.ParseDuration(t)
|
d.Duration, err = time.ParseDuration(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if d.Duration < 0 {
|
||||||
|
d.Duration = time.Duration(math.MaxInt64)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Unsupported type: '%s'", reflect.TypeOf(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
107
api/types_test.go
Normal file
107
api/types_test.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"math"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestKeepAliveParsingFromJSON(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
req string
|
||||||
|
exp *Duration
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Positive Integer",
|
||||||
|
req: `{ "keep_alive": 42 }`,
|
||||||
|
exp: &Duration{42 * time.Second},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Positive Float",
|
||||||
|
req: `{ "keep_alive": 42.5 }`,
|
||||||
|
exp: &Duration{42 * time.Second},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Positive Integer String",
|
||||||
|
req: `{ "keep_alive": "42m" }`,
|
||||||
|
exp: &Duration{42 * time.Minute},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Negative Integer",
|
||||||
|
req: `{ "keep_alive": -1 }`,
|
||||||
|
exp: &Duration{math.MaxInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Negative Float",
|
||||||
|
req: `{ "keep_alive": -3.14 }`,
|
||||||
|
exp: &Duration{math.MaxInt64},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Negative Integer String",
|
||||||
|
req: `{ "keep_alive": "-1m" }`,
|
||||||
|
exp: &Duration{math.MaxInt64},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
var dec ChatRequest
|
||||||
|
err := json.Unmarshal([]byte(test.req), &dec)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, test.exp, dec.KeepAlive)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDurationMarshalUnmarshal(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input time.Duration
|
||||||
|
expected time.Duration
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"negative duration",
|
||||||
|
time.Duration(-1),
|
||||||
|
time.Duration(math.MaxInt64),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"positive duration",
|
||||||
|
42 * time.Second,
|
||||||
|
42 * time.Second,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"another positive duration",
|
||||||
|
42 * time.Minute,
|
||||||
|
42 * time.Minute,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"zero duration",
|
||||||
|
time.Duration(0),
|
||||||
|
time.Duration(0),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"max duration",
|
||||||
|
time.Duration(math.MaxInt64),
|
||||||
|
time.Duration(math.MaxInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
b, err := json.Marshal(Duration{test.input})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var d Duration
|
||||||
|
err = json.Unmarshal(b, &d)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, test.expected, d.Duration, "input %v, marshalled %v, got %v", test.input, string(b), d.Duration)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
93
app/.gitignore
vendored
93
app/.gitignore
vendored
@@ -1,92 +1 @@
|
|||||||
# Logs
|
ollama.syso
|
||||||
logs
|
|
||||||
*.log
|
|
||||||
npm-debug.log*
|
|
||||||
yarn-debug.log*
|
|
||||||
yarn-error.log*
|
|
||||||
lerna-debug.log*
|
|
||||||
|
|
||||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
|
||||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
|
||||||
|
|
||||||
# Runtime data
|
|
||||||
pids
|
|
||||||
*.pid
|
|
||||||
*.seed
|
|
||||||
*.pid.lock
|
|
||||||
.DS_Store
|
|
||||||
|
|
||||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
|
||||||
lib-cov
|
|
||||||
|
|
||||||
# Coverage directory used by tools like istanbul
|
|
||||||
coverage
|
|
||||||
*.lcov
|
|
||||||
|
|
||||||
# nyc test coverage
|
|
||||||
.nyc_output
|
|
||||||
|
|
||||||
# node-waf configuration
|
|
||||||
.lock-wscript
|
|
||||||
|
|
||||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
|
||||||
build/Release
|
|
||||||
|
|
||||||
# Dependency directories
|
|
||||||
node_modules/
|
|
||||||
jspm_packages/
|
|
||||||
|
|
||||||
# TypeScript v1 declaration files
|
|
||||||
typings/
|
|
||||||
|
|
||||||
# TypeScript cache
|
|
||||||
*.tsbuildinfo
|
|
||||||
|
|
||||||
# Optional npm cache directory
|
|
||||||
.npm
|
|
||||||
|
|
||||||
# Optional eslint cache
|
|
||||||
.eslintcache
|
|
||||||
|
|
||||||
# Optional REPL history
|
|
||||||
.node_repl_history
|
|
||||||
|
|
||||||
# Output of 'npm pack'
|
|
||||||
*.tgz
|
|
||||||
|
|
||||||
# Yarn Integrity file
|
|
||||||
.yarn-integrity
|
|
||||||
|
|
||||||
# dotenv environment variables file
|
|
||||||
.env
|
|
||||||
.env.test
|
|
||||||
|
|
||||||
# parcel-bundler cache (https://parceljs.org/)
|
|
||||||
.cache
|
|
||||||
|
|
||||||
# next.js build output
|
|
||||||
.next
|
|
||||||
|
|
||||||
# nuxt.js build output
|
|
||||||
.nuxt
|
|
||||||
|
|
||||||
# vuepress build output
|
|
||||||
.vuepress/dist
|
|
||||||
|
|
||||||
# Serverless directories
|
|
||||||
.serverless/
|
|
||||||
|
|
||||||
# FuseBox cache
|
|
||||||
.fusebox/
|
|
||||||
|
|
||||||
# DynamoDB Local files
|
|
||||||
.dynamodb/
|
|
||||||
|
|
||||||
# Webpack
|
|
||||||
.webpack/
|
|
||||||
|
|
||||||
# Vite
|
|
||||||
.vite/
|
|
||||||
|
|
||||||
# Electron-Forge
|
|
||||||
out/
|
|
||||||
|
|||||||
@@ -1,21 +1,22 @@
|
|||||||
# Desktop
|
# Ollama App
|
||||||
|
|
||||||
This app builds upon Ollama to provide a desktop experience for running models.
|
## Linux
|
||||||
|
|
||||||
## Developing
|
TODO
|
||||||
|
|
||||||
First, build the `ollama` binary:
|
## MacOS
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
## Windows
|
||||||
|
|
||||||
|
If you want to build the installer, youll need to install
|
||||||
|
- https://jrsoftware.org/isinfo.php
|
||||||
|
|
||||||
|
|
||||||
|
In the top directory of this repo, run the following powershell script
|
||||||
|
to build the ollama CLI, ollama app, and ollama installer.
|
||||||
|
|
||||||
```
|
```
|
||||||
cd ..
|
powershell -ExecutionPolicy Bypass -File .\scripts\build_windows.ps1
|
||||||
go build .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Then run the desktop app with `npm start`:
|
|
||||||
|
|
||||||
```
|
|
||||||
cd app
|
|
||||||
npm install
|
|
||||||
npm start
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|||||||
BIN
app/assets/app.ico
Normal file
BIN
app/assets/app.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 7.3 KiB |
17
app/assets/assets.go
Normal file
17
app/assets/assets.go
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
package assets
|
||||||
|
|
||||||
|
import (
|
||||||
|
"embed"
|
||||||
|
"io/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed *.ico
|
||||||
|
var icons embed.FS
|
||||||
|
|
||||||
|
func ListIcons() ([]string, error) {
|
||||||
|
return fs.Glob(icons, "*")
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetIcon(filename string) ([]byte, error) {
|
||||||
|
return icons.ReadFile(filename)
|
||||||
|
}
|
||||||
BIN
app/assets/setup.bmp
Normal file
BIN
app/assets/setup.bmp
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 76 KiB |
BIN
app/assets/tray.ico
Normal file
BIN
app/assets/tray.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 89 KiB |
BIN
app/assets/tray_upgrade.ico
Normal file
BIN
app/assets/tray_upgrade.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 91 KiB |
9
app/lifecycle/getstarted_nonwindows.go
Normal file
9
app/lifecycle/getstarted_nonwindows.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
func GetStarted() error {
|
||||||
|
return fmt.Errorf("GetStarted not implemented")
|
||||||
|
}
|
||||||
44
app/lifecycle/getstarted_windows.go
Normal file
44
app/lifecycle/getstarted_windows.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetStarted() error {
|
||||||
|
const CREATE_NEW_CONSOLE = 0x00000010
|
||||||
|
var err error
|
||||||
|
bannerScript := filepath.Join(AppDir, "ollama_welcome.ps1")
|
||||||
|
args := []string{
|
||||||
|
// TODO once we're signed, the execution policy bypass should be removed
|
||||||
|
"powershell", "-noexit", "-ExecutionPolicy", "Bypass", "-nologo", "-file", bannerScript,
|
||||||
|
}
|
||||||
|
args[0], err = exec.LookPath(args[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure the script actually exists
|
||||||
|
_, err = os.Stat(bannerScript)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting started banner script error %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info(fmt.Sprintf("opening getting started terminal with %v", args))
|
||||||
|
attrs := &os.ProcAttr{
|
||||||
|
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
|
||||||
|
Sys: &syscall.SysProcAttr{CreationFlags: CREATE_NEW_CONSOLE, HideWindow: false},
|
||||||
|
}
|
||||||
|
proc, err := os.StartProcess(args[0], args, attrs)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to start getting started shell %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug(fmt.Sprintf("getting started terminal PID: %d", proc.Pid))
|
||||||
|
return proc.Release()
|
||||||
|
}
|
||||||
92
app/lifecycle/lifecycle.go
Normal file
92
app/lifecycle/lifecycle.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/app/store"
|
||||||
|
"github.com/ollama/ollama/app/tray"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Run() {
|
||||||
|
InitLogging()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
var done chan int
|
||||||
|
|
||||||
|
t, err := tray.NewTray()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to start: %s", err)
|
||||||
|
}
|
||||||
|
callbacks := t.GetCallbacks()
|
||||||
|
|
||||||
|
signals := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
slog.Debug("starting callback loop")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-callbacks.Quit:
|
||||||
|
slog.Debug("quit called")
|
||||||
|
t.Quit()
|
||||||
|
case <-signals:
|
||||||
|
slog.Debug("shutting down due to signal")
|
||||||
|
t.Quit()
|
||||||
|
case <-callbacks.Update:
|
||||||
|
err := DoUpgrade(cancel, done)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("upgrade attempt failed: %s", err))
|
||||||
|
}
|
||||||
|
case <-callbacks.ShowLogs:
|
||||||
|
ShowLogs()
|
||||||
|
case <-callbacks.DoFirstUse:
|
||||||
|
err := GetStarted()
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("Failed to launch getting started shell: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Are we first use?
|
||||||
|
if !store.GetFirstTimeRun() {
|
||||||
|
slog.Debug("First time run")
|
||||||
|
err = t.DisplayFirstUseNotification()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug(fmt.Sprintf("XXX failed to display first use notification %v", err))
|
||||||
|
}
|
||||||
|
store.SetFirstTimeRun(true)
|
||||||
|
} else {
|
||||||
|
slog.Debug("Not first time, skipping first run notification")
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsServerRunning(ctx) {
|
||||||
|
slog.Info("Detected another instance of ollama running, exiting")
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
done, err = SpawnServer(ctx, CLIName)
|
||||||
|
if err != nil {
|
||||||
|
// TODO - should we retry in a backoff loop?
|
||||||
|
// TODO - should we pop up a warning and maybe add a menu item to view application logs?
|
||||||
|
slog.Error(fmt.Sprintf("Failed to spawn ollama server %s", err))
|
||||||
|
done = make(chan int, 1)
|
||||||
|
done <- 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
StartBackgroundUpdaterChecker(ctx, t.UpdateAvailable)
|
||||||
|
|
||||||
|
t.Run()
|
||||||
|
cancel()
|
||||||
|
slog.Info("Waiting for ollama server to shutdown...")
|
||||||
|
if done != nil {
|
||||||
|
<-done
|
||||||
|
}
|
||||||
|
slog.Info("Ollama app exiting")
|
||||||
|
}
|
||||||
48
app/lifecycle/logging.go
Normal file
48
app/lifecycle/logging.go
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
func InitLogging() {
|
||||||
|
level := slog.LevelInfo
|
||||||
|
|
||||||
|
if envconfig.Debug {
|
||||||
|
level = slog.LevelDebug
|
||||||
|
}
|
||||||
|
|
||||||
|
var logFile *os.File
|
||||||
|
var err error
|
||||||
|
// Detect if we're a GUI app on windows, and if not, send logs to console
|
||||||
|
if os.Stderr.Fd() != 0 {
|
||||||
|
// Console app detected
|
||||||
|
logFile = os.Stderr
|
||||||
|
// TODO - write one-line to the app.log file saying we're running in console mode to help avoid confusion
|
||||||
|
} else {
|
||||||
|
logFile, err = os.OpenFile(AppLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to create server log %v", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
handler := slog.NewTextHandler(logFile, &slog.HandlerOptions{
|
||||||
|
Level: level,
|
||||||
|
AddSource: true,
|
||||||
|
ReplaceAttr: func(_ []string, attr slog.Attr) slog.Attr {
|
||||||
|
if attr.Key == slog.SourceKey {
|
||||||
|
source := attr.Value.Any().(*slog.Source)
|
||||||
|
source.File = filepath.Base(source.File)
|
||||||
|
}
|
||||||
|
return attr
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
slog.SetDefault(slog.New(handler))
|
||||||
|
|
||||||
|
slog.Info("ollama app started")
|
||||||
|
}
|
||||||
9
app/lifecycle/logging_nonwindows.go
Normal file
9
app/lifecycle/logging_nonwindows.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import "log/slog"
|
||||||
|
|
||||||
|
func ShowLogs() {
|
||||||
|
slog.Warn("ShowLogs not yet implemented")
|
||||||
|
}
|
||||||
19
app/lifecycle/logging_windows.go
Normal file
19
app/lifecycle/logging_windows.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ShowLogs() {
|
||||||
|
cmd_path := "c:\\Windows\\system32\\cmd.exe"
|
||||||
|
slog.Debug(fmt.Sprintf("viewing logs with start %s", AppDataDir))
|
||||||
|
cmd := exec.Command(cmd_path, "/c", "start", AppDataDir)
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: false, CreationFlags: 0x08000000}
|
||||||
|
err := cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("Failed to open log dir: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
78
app/lifecycle/paths.go
Normal file
78
app/lifecycle/paths.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
AppName = "ollama app"
|
||||||
|
CLIName = "ollama"
|
||||||
|
AppDir = "/opt/Ollama"
|
||||||
|
AppDataDir = "/opt/Ollama"
|
||||||
|
// TODO - should there be a distinct log dir?
|
||||||
|
UpdateStageDir = "/tmp"
|
||||||
|
AppLogFile = "/tmp/ollama_app.log"
|
||||||
|
ServerLogFile = "/tmp/ollama.log"
|
||||||
|
UpgradeLogFile = "/tmp/ollama_update.log"
|
||||||
|
Installer = "OllamaSetup.exe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
AppName += ".exe"
|
||||||
|
CLIName += ".exe"
|
||||||
|
// Logs, configs, downloads go to LOCALAPPDATA
|
||||||
|
localAppData := os.Getenv("LOCALAPPDATA")
|
||||||
|
AppDataDir = filepath.Join(localAppData, "Ollama")
|
||||||
|
UpdateStageDir = filepath.Join(AppDataDir, "updates")
|
||||||
|
AppLogFile = filepath.Join(AppDataDir, "app.log")
|
||||||
|
ServerLogFile = filepath.Join(AppDataDir, "server.log")
|
||||||
|
UpgradeLogFile = filepath.Join(AppDataDir, "upgrade.log")
|
||||||
|
|
||||||
|
// Executables are stored in APPDATA
|
||||||
|
AppDir = filepath.Join(localAppData, "Programs", "Ollama")
|
||||||
|
|
||||||
|
// Make sure we have PATH set correctly for any spawned children
|
||||||
|
paths := strings.Split(os.Getenv("PATH"), ";")
|
||||||
|
// Start with whatever we find in the PATH/LD_LIBRARY_PATH
|
||||||
|
found := false
|
||||||
|
for _, path := range paths {
|
||||||
|
d, err := filepath.Abs(path)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.EqualFold(AppDir, d) {
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
paths = append(paths, AppDir)
|
||||||
|
|
||||||
|
pathVal := strings.Join(paths, ";")
|
||||||
|
slog.Debug("setting PATH=" + pathVal)
|
||||||
|
err := os.Setenv("PATH", pathVal)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to update PATH: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure our logging dir exists
|
||||||
|
_, err := os.Stat(AppDataDir)
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
if err := os.MkdirAll(AppDataDir, 0o755); err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if runtime.GOOS == "darwin" {
|
||||||
|
// TODO
|
||||||
|
AppName += ".app"
|
||||||
|
// } else if runtime.GOOS == "linux" {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
}
|
||||||
180
app/lifecycle/server.go
Normal file
180
app/lifecycle/server.go
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getCLIFullPath(command string) string {
|
||||||
|
var cmdPath string
|
||||||
|
appExe, err := os.Executable()
|
||||||
|
if err == nil {
|
||||||
|
cmdPath = filepath.Join(filepath.Dir(appExe), command)
|
||||||
|
_, err := os.Stat(cmdPath)
|
||||||
|
if err == nil {
|
||||||
|
return cmdPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cmdPath, err = exec.LookPath(command)
|
||||||
|
if err == nil {
|
||||||
|
_, err := os.Stat(cmdPath)
|
||||||
|
if err == nil {
|
||||||
|
return cmdPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pwd, err := os.Getwd()
|
||||||
|
if err == nil {
|
||||||
|
cmdPath = filepath.Join(pwd, command)
|
||||||
|
_, err = os.Stat(cmdPath)
|
||||||
|
if err == nil {
|
||||||
|
return cmdPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
func start(ctx context.Context, command string) (*exec.Cmd, error) {
|
||||||
|
cmd := getCmd(ctx, getCLIFullPath(command))
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to spawn server stdout pipe: %w", err)
|
||||||
|
}
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to spawn server stderr pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO - rotation
|
||||||
|
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create server log: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
logDir := filepath.Dir(ServerLogFile)
|
||||||
|
_, err = os.Stat(logDir)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil, fmt.Errorf("stat ollama server log dir %s: %v", logDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||||
|
return nil, fmt.Errorf("create ollama server log dir %s: %v", logDir, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer logFile.Close()
|
||||||
|
io.Copy(logFile, stdout) //nolint:errcheck
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer logFile.Close()
|
||||||
|
io.Copy(logFile, stderr) //nolint:errcheck
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Re-wire context done behavior to attempt a graceful shutdown of the server
|
||||||
|
cmd.Cancel = func() error {
|
||||||
|
if cmd.Process != nil {
|
||||||
|
err := terminate(cmd)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("error trying to gracefully terminate server", "err", err)
|
||||||
|
return cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
|
||||||
|
tick := time.NewTicker(10 * time.Millisecond)
|
||||||
|
defer tick.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick.C:
|
||||||
|
exited, err := isProcessExited(cmd.Process.Pid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if exited {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
case <-time.After(5 * time.Second):
|
||||||
|
slog.Warn("graceful server shutdown timeout, killing", "pid", cmd.Process.Pid)
|
||||||
|
return cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the command and wait for it to finish
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to start server %w", err)
|
||||||
|
}
|
||||||
|
if cmd.Process != nil {
|
||||||
|
slog.Info(fmt.Sprintf("started ollama server with pid %d", cmd.Process.Pid))
|
||||||
|
}
|
||||||
|
slog.Info(fmt.Sprintf("ollama server logs %s", ServerLogFile))
|
||||||
|
|
||||||
|
return cmd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SpawnServer(ctx context.Context, command string) (chan int, error) {
|
||||||
|
done := make(chan int)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// Keep the server running unless we're shuttind down the app
|
||||||
|
crashCount := 0
|
||||||
|
for {
|
||||||
|
slog.Info("starting server...")
|
||||||
|
cmd, err := start(ctx, command)
|
||||||
|
if err != nil {
|
||||||
|
crashCount++
|
||||||
|
slog.Error(fmt.Sprintf("failed to start server %s", err))
|
||||||
|
time.Sleep(500 * time.Millisecond * time.Duration(crashCount))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Wait() //nolint:errcheck
|
||||||
|
var code int
|
||||||
|
if cmd.ProcessState != nil {
|
||||||
|
code = cmd.ProcessState.ExitCode()
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
slog.Info(fmt.Sprintf("server shutdown with exit code %d", code))
|
||||||
|
done <- code
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
crashCount++
|
||||||
|
slog.Warn(fmt.Sprintf("server crash %d - exit code %d - respawning", crashCount, code))
|
||||||
|
time.Sleep(500 * time.Millisecond * time.Duration(crashCount))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return done, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsServerRunning(ctx context.Context) bool {
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
slog.Info("unable to connect to server")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
err = client.Heartbeat(ctx)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug(fmt.Sprintf("heartbeat from server: %s", err))
|
||||||
|
slog.Info("unable to connect to server")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
38
app/lifecycle/server_unix.go
Normal file
38
app/lifecycle/server_unix.go
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getCmd(ctx context.Context, cmd string) *exec.Cmd {
|
||||||
|
return exec.CommandContext(ctx, cmd, "serve")
|
||||||
|
}
|
||||||
|
|
||||||
|
func terminate(cmd *exec.Cmd) error {
|
||||||
|
return cmd.Process.Signal(os.Interrupt)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isProcessExited(pid int) (bool, error) {
|
||||||
|
proc, err := os.FindProcess(pid)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to find process: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = proc.Signal(syscall.Signal(0))
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrProcessDone) || errors.Is(err, syscall.ESRCH) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, fmt.Errorf("error signaling process: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
91
app/lifecycle/server_windows.go
Normal file
91
app/lifecycle/server_windows.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getCmd(ctx context.Context, exePath string) *exec.Cmd {
|
||||||
|
cmd := exec.CommandContext(ctx, exePath, "serve")
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{
|
||||||
|
HideWindow: true,
|
||||||
|
CreationFlags: windows.CREATE_NEW_PROCESS_GROUP,
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
func terminate(cmd *exec.Cmd) error {
|
||||||
|
dll, err := windows.LoadDLL("kernel32.dll")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//nolint:errcheck
|
||||||
|
defer dll.Release()
|
||||||
|
|
||||||
|
pid := cmd.Process.Pid
|
||||||
|
|
||||||
|
f, err := dll.FindProc("AttachConsole")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r1, _, err := f.Call(uintptr(pid))
|
||||||
|
if r1 == 0 && err != syscall.ERROR_ACCESS_DENIED {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err = dll.FindProc("SetConsoleCtrlHandler")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r1, _, err = f.Call(0, 1)
|
||||||
|
if r1 == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err = dll.FindProc("GenerateConsoleCtrlEvent")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r1, _, err = f.Call(windows.CTRL_BREAK_EVENT, uintptr(pid))
|
||||||
|
if r1 == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r1, _, err = f.Call(windows.CTRL_C_EVENT, uintptr(pid))
|
||||||
|
if r1 == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const STILL_ACTIVE = 259
|
||||||
|
|
||||||
|
func isProcessExited(pid int) (bool, error) {
|
||||||
|
hProcess, err := windows.OpenProcess(windows.PROCESS_QUERY_INFORMATION, false, uint32(pid))
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to open process: %v", err)
|
||||||
|
}
|
||||||
|
//nolint:errcheck
|
||||||
|
defer windows.CloseHandle(hProcess)
|
||||||
|
|
||||||
|
var exitCode uint32
|
||||||
|
err = windows.GetExitCodeProcess(hProcess, &exitCode)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to get exit code: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exitCode == STILL_ACTIVE {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
228
app/lifecycle/updater.go
Normal file
228
app/lifecycle/updater.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/auth"
|
||||||
|
"github.com/ollama/ollama/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
UpdateCheckURLBase = "https://ollama.com/api/update"
|
||||||
|
UpdateDownloaded = false
|
||||||
|
UpdateCheckInterval = 60 * 60 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO - maybe move up to the API package?
|
||||||
|
type UpdateResponse struct {
|
||||||
|
UpdateURL string `json:"url"`
|
||||||
|
UpdateVersion string `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
|
||||||
|
var updateResp UpdateResponse
|
||||||
|
|
||||||
|
requestURL, err := url.Parse(UpdateCheckURLBase)
|
||||||
|
if err != nil {
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
|
||||||
|
query := requestURL.Query()
|
||||||
|
query.Add("os", runtime.GOOS)
|
||||||
|
query.Add("arch", runtime.GOARCH)
|
||||||
|
query.Add("version", version.Version)
|
||||||
|
query.Add("ts", fmt.Sprintf("%d", time.Now().Unix()))
|
||||||
|
|
||||||
|
nonce, err := auth.NewNonce(rand.Reader, 16)
|
||||||
|
if err != nil {
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
|
||||||
|
query.Add("nonce", nonce)
|
||||||
|
requestURL.RawQuery = query.Encode()
|
||||||
|
|
||||||
|
data := []byte(fmt.Sprintf("%s,%s", http.MethodGet, requestURL.RequestURI()))
|
||||||
|
signature, err := auth.Sign(ctx, data)
|
||||||
|
if err != nil {
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestURL.String(), nil)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("failed to check for update: %s", err))
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
req.Header.Set("Authorization", signature)
|
||||||
|
req.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
|
||||||
|
|
||||||
|
slog.Debug("checking for available update", "requestURL", requestURL)
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("failed to check for update: %s", err))
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusNoContent {
|
||||||
|
slog.Debug("check update response 204 (current version is up to date)")
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("failed to read body response: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
slog.Info(fmt.Sprintf("check update error %d - %.96s", resp.StatusCode, string(body)))
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
err = json.Unmarshal(body, &updateResp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("malformed response checking for update: %s", err))
|
||||||
|
return false, updateResp
|
||||||
|
}
|
||||||
|
// Extract the version string from the URL in the github release artifact path
|
||||||
|
updateResp.UpdateVersion = path.Base(path.Dir(updateResp.UpdateURL))
|
||||||
|
|
||||||
|
slog.Info("New update available at " + updateResp.UpdateURL)
|
||||||
|
return true, updateResp
|
||||||
|
}
|
||||||
|
|
||||||
|
func DownloadNewRelease(ctx context.Context, updateResp UpdateResponse) error {
|
||||||
|
// Do a head first to check etag info
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodHead, updateResp.UpdateURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error checking update: %w", err)
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
etag := strings.Trim(resp.Header.Get("etag"), "\"")
|
||||||
|
if etag == "" {
|
||||||
|
slog.Debug("no etag detected, falling back to filename based dedup")
|
||||||
|
etag = "_"
|
||||||
|
}
|
||||||
|
filename := Installer
|
||||||
|
_, params, err := mime.ParseMediaType(resp.Header.Get("content-disposition"))
|
||||||
|
if err == nil {
|
||||||
|
filename = params["filename"]
|
||||||
|
}
|
||||||
|
|
||||||
|
stageFilename := filepath.Join(UpdateStageDir, etag, filename)
|
||||||
|
|
||||||
|
// Check to see if we already have it downloaded
|
||||||
|
_, err = os.Stat(stageFilename)
|
||||||
|
if err == nil {
|
||||||
|
slog.Info("update already downloaded")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanupOldDownloads()
|
||||||
|
|
||||||
|
req.Method = http.MethodGet
|
||||||
|
resp, err = http.DefaultClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error checking update: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
etag = strings.Trim(resp.Header.Get("etag"), "\"")
|
||||||
|
if etag == "" {
|
||||||
|
slog.Debug("no etag detected, falling back to filename based dedup") // TODO probably can get rid of this redundant log
|
||||||
|
etag = "_"
|
||||||
|
}
|
||||||
|
|
||||||
|
stageFilename = filepath.Join(UpdateStageDir, etag, filename)
|
||||||
|
|
||||||
|
_, err = os.Stat(filepath.Dir(stageFilename))
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
if err := os.MkdirAll(filepath.Dir(stageFilename), 0o755); err != nil {
|
||||||
|
return fmt.Errorf("create ollama dir %s: %v", filepath.Dir(stageFilename), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read body response: %w", err)
|
||||||
|
}
|
||||||
|
fp, err := os.OpenFile(stageFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("write payload %s: %w", stageFilename, err)
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
if n, err := fp.Write(payload); err != nil || n != len(payload) {
|
||||||
|
return fmt.Errorf("write payload %s: %d vs %d -- %w", stageFilename, n, len(payload), err)
|
||||||
|
}
|
||||||
|
slog.Info("new update downloaded " + stageFilename)
|
||||||
|
|
||||||
|
UpdateDownloaded = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupOldDownloads() {
|
||||||
|
files, err := os.ReadDir(UpdateStageDir)
|
||||||
|
if err != nil && errors.Is(err, os.ErrNotExist) {
|
||||||
|
// Expected behavior on first run
|
||||||
|
return
|
||||||
|
} else if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("failed to list stage dir: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, file := range files {
|
||||||
|
fullname := filepath.Join(UpdateStageDir, file.Name())
|
||||||
|
slog.Debug("cleaning up old download: " + fullname)
|
||||||
|
err = os.RemoveAll(fullname)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("failed to cleanup stale update download %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func StartBackgroundUpdaterChecker(ctx context.Context, cb func(string) error) {
|
||||||
|
go func() {
|
||||||
|
// Don't blast an update message immediately after startup
|
||||||
|
// time.Sleep(30 * time.Second)
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
for {
|
||||||
|
available, resp := IsNewReleaseAvailable(ctx)
|
||||||
|
if available {
|
||||||
|
err := DownloadNewRelease(ctx, resp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to download new release: %s", err))
|
||||||
|
}
|
||||||
|
err = cb(resp.UpdateVersion)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn(fmt.Sprintf("failed to register update available with tray: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
slog.Debug("stopping background update checker")
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
time.Sleep(UpdateCheckInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
12
app/lifecycle/updater_nonwindows.go
Normal file
12
app/lifecycle/updater_nonwindows.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
||||||
|
return fmt.Errorf("DoUpgrade not yet implemented")
|
||||||
|
}
|
||||||
77
app/lifecycle/updater_windows.go
Normal file
77
app/lifecycle/updater_windows.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
||||||
|
files, err := filepath.Glob(filepath.Join(UpdateStageDir, "*", "*.exe")) // TODO generalize for multiplatform
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to lookup downloads: %s", err)
|
||||||
|
}
|
||||||
|
if len(files) == 0 {
|
||||||
|
return fmt.Errorf("no update downloads found")
|
||||||
|
} else if len(files) > 1 {
|
||||||
|
// Shouldn't happen
|
||||||
|
slog.Warn(fmt.Sprintf("multiple downloads found, using first one %v", files))
|
||||||
|
}
|
||||||
|
installerExe := files[0]
|
||||||
|
|
||||||
|
slog.Info("starting upgrade with " + installerExe)
|
||||||
|
slog.Info("upgrade log file " + UpgradeLogFile)
|
||||||
|
|
||||||
|
// When running in debug mode, we'll be "verbose" and let the installer pop up and prompt
|
||||||
|
installArgs := []string{
|
||||||
|
"/CLOSEAPPLICATIONS", // Quit the tray app if it's still running
|
||||||
|
"/LOG=" + filepath.Base(UpgradeLogFile), // Only relative seems reliable, so set pwd
|
||||||
|
"/FORCECLOSEAPPLICATIONS", // Force close the tray app - might be needed
|
||||||
|
}
|
||||||
|
// make the upgrade as quiet as possible (no GUI, no prompts)
|
||||||
|
installArgs = append(installArgs,
|
||||||
|
"/SP", // Skip the "This will install... Do you wish to continue" prompt
|
||||||
|
"/SUPPRESSMSGBOXES",
|
||||||
|
"/SILENT",
|
||||||
|
"/VERYSILENT",
|
||||||
|
)
|
||||||
|
|
||||||
|
// Safeguard in case we have requests in flight that need to drain...
|
||||||
|
slog.Info("Waiting for server to shutdown")
|
||||||
|
cancel()
|
||||||
|
if done != nil {
|
||||||
|
<-done
|
||||||
|
} else {
|
||||||
|
// Shouldn't happen
|
||||||
|
slog.Warn("done chan was nil, not actually waiting")
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug(fmt.Sprintf("starting installer: %s %v", installerExe, installArgs))
|
||||||
|
os.Chdir(filepath.Dir(UpgradeLogFile)) //nolint:errcheck
|
||||||
|
cmd := exec.Command(installerExe, installArgs...)
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("unable to start ollama app %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.Process != nil {
|
||||||
|
err = cmd.Process.Release()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to release server process: %s", err))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// TODO - some details about why it didn't start, or is this a pedantic error case?
|
||||||
|
return fmt.Errorf("installer process did not start")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO should we linger for a moment and check to make sure it's actually running by checking the pid?
|
||||||
|
|
||||||
|
slog.Info("Installer started in background, exiting")
|
||||||
|
|
||||||
|
os.Exit(0)
|
||||||
|
// Not reached
|
||||||
|
return nil
|
||||||
|
}
|
||||||
12
app/main.go
Normal file
12
app/main.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
// Compile with the following to get rid of the cmd pop up on windows
|
||||||
|
// go build -ldflags="-H windowsgui" .
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ollama/ollama/app/lifecycle"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
lifecycle.Run()
|
||||||
|
}
|
||||||
156
app/ollama.iss
Normal file
156
app/ollama.iss
Normal file
@@ -0,0 +1,156 @@
|
|||||||
|
; Inno Setup Installer for Ollama
|
||||||
|
;
|
||||||
|
; To build the installer use the build script invoked from the top of the source tree
|
||||||
|
;
|
||||||
|
; powershell -ExecutionPolicy Bypass -File .\scripts\build_windows.ps
|
||||||
|
|
||||||
|
|
||||||
|
#define MyAppName "Ollama"
|
||||||
|
#if GetEnv("PKG_VERSION") != ""
|
||||||
|
#define MyAppVersion GetEnv("PKG_VERSION")
|
||||||
|
#else
|
||||||
|
#define MyAppVersion "0.0.0"
|
||||||
|
#endif
|
||||||
|
#define MyAppPublisher "Ollama"
|
||||||
|
#define MyAppURL "https://ollama.com/"
|
||||||
|
#define MyAppExeName "ollama app.exe"
|
||||||
|
#define MyIcon ".\assets\app.ico"
|
||||||
|
|
||||||
|
[Setup]
|
||||||
|
; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications.
|
||||||
|
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
|
||||||
|
AppId={{44E83376-CE68-45EB-8FC1-393500EB558C}
|
||||||
|
AppName={#MyAppName}
|
||||||
|
AppVersion={#MyAppVersion}
|
||||||
|
VersionInfoVersion={#MyAppVersion}
|
||||||
|
;AppVerName={#MyAppName} {#MyAppVersion}
|
||||||
|
AppPublisher={#MyAppPublisher}
|
||||||
|
AppPublisherURL={#MyAppURL}
|
||||||
|
AppSupportURL={#MyAppURL}
|
||||||
|
AppUpdatesURL={#MyAppURL}
|
||||||
|
ArchitecturesAllowed=x64 arm64
|
||||||
|
ArchitecturesInstallIn64BitMode=x64 arm64
|
||||||
|
DefaultDirName={localappdata}\Programs\{#MyAppName}
|
||||||
|
DefaultGroupName={#MyAppName}
|
||||||
|
DisableProgramGroupPage=yes
|
||||||
|
PrivilegesRequired=lowest
|
||||||
|
OutputBaseFilename="OllamaSetup"
|
||||||
|
SetupIconFile={#MyIcon}
|
||||||
|
UninstallDisplayIcon={uninstallexe}
|
||||||
|
Compression=lzma2
|
||||||
|
SolidCompression=no
|
||||||
|
WizardStyle=modern
|
||||||
|
ChangesEnvironment=yes
|
||||||
|
OutputDir=..\dist\
|
||||||
|
|
||||||
|
; Disable logging once everything's battle tested
|
||||||
|
; Filename will be %TEMP%\Setup Log*.txt
|
||||||
|
SetupLogging=yes
|
||||||
|
CloseApplications=yes
|
||||||
|
RestartApplications=no
|
||||||
|
|
||||||
|
; https://jrsoftware.org/ishelp/index.php?topic=setup_wizardimagefile
|
||||||
|
WizardSmallImageFile=.\assets\setup.bmp
|
||||||
|
|
||||||
|
; TODO verifty actual min windows version...
|
||||||
|
; OG Win 10
|
||||||
|
MinVersion=10.0.10240
|
||||||
|
|
||||||
|
; First release that supports WinRT UI Composition for win32 apps
|
||||||
|
; MinVersion=10.0.17134
|
||||||
|
; First release with XAML Islands - possible UI path forward
|
||||||
|
; MinVersion=10.0.18362
|
||||||
|
|
||||||
|
; quiet...
|
||||||
|
DisableDirPage=yes
|
||||||
|
DisableFinishedPage=yes
|
||||||
|
DisableReadyMemo=yes
|
||||||
|
DisableReadyPage=yes
|
||||||
|
DisableStartupPrompt=yes
|
||||||
|
DisableWelcomePage=yes
|
||||||
|
|
||||||
|
; TODO - percentage can't be set less than 100, so how to make it shorter?
|
||||||
|
; WizardSizePercent=100,80
|
||||||
|
|
||||||
|
#if GetEnv("KEY_CONTAINER")
|
||||||
|
SignTool=MySignTool
|
||||||
|
SignedUninstaller=yes
|
||||||
|
#endif
|
||||||
|
|
||||||
|
SetupMutex=OllamaSetupMutex
|
||||||
|
|
||||||
|
[Languages]
|
||||||
|
Name: "english"; MessagesFile: "compiler:Default.isl"
|
||||||
|
|
||||||
|
[LangOptions]
|
||||||
|
DialogFontSize=12
|
||||||
|
|
||||||
|
[Files]
|
||||||
|
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
||||||
|
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||||
|
Source: "..\dist\windows-{#ARCH}\*.dll"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||||
|
Source: "..\dist\windows-{#ARCH}\ollama_runners\*"; DestDir: "{app}\ollama_runners"; Flags: ignoreversion 64bit recursesubdirs
|
||||||
|
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
||||||
|
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
||||||
|
#if DirExists("..\dist\windows-amd64\rocm")
|
||||||
|
Source: "..\dist\windows-amd64\rocm\*"; DestDir: "{app}\rocm\"; Flags: ignoreversion recursesubdirs
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
[Icons]
|
||||||
|
Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||||
|
Name: "{userstartup}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||||
|
Name: "{userprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||||
|
|
||||||
|
[Run]
|
||||||
|
Filename: "{cmd}"; Parameters: "/C set PATH={app};%PATH% & ""{app}\{#MyAppExeName}"""; Flags: postinstall nowait runhidden
|
||||||
|
|
||||||
|
[UninstallRun]
|
||||||
|
; Filename: "{cmd}"; Parameters: "/C ""taskkill /im ''{#MyAppExeName}'' /f /t"; Flags: runhidden
|
||||||
|
; Filename: "{cmd}"; Parameters: "/C ""taskkill /im ollama.exe /f /t"; Flags: runhidden
|
||||||
|
Filename: "taskkill"; Parameters: "/im ""{#MyAppExeName}"" /f /t"; Flags: runhidden
|
||||||
|
Filename: "taskkill"; Parameters: "/im ""ollama.exe"" /f /t"; Flags: runhidden
|
||||||
|
; HACK! need to give the server and app enough time to exit
|
||||||
|
; TODO - convert this to a Pascal code script so it waits until they're no longer running, then completes
|
||||||
|
Filename: "{cmd}"; Parameters: "/c timeout 5"; Flags: runhidden
|
||||||
|
|
||||||
|
[UninstallDelete]
|
||||||
|
Type: filesandordirs; Name: "{%TEMP}\ollama*"
|
||||||
|
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Ollama"
|
||||||
|
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama"
|
||||||
|
Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\models"
|
||||||
|
Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\history"
|
||||||
|
; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved
|
||||||
|
|
||||||
|
[Messages]
|
||||||
|
WizardReady=Ollama Windows Preview
|
||||||
|
ReadyLabel1=%nLet's get you up and running with your own large language models.
|
||||||
|
SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or finish the other installer, then click OK to continue with this install, or Cancel to exit.
|
||||||
|
|
||||||
|
|
||||||
|
;FinishedHeadingLabel=Run your first model
|
||||||
|
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3
|
||||||
|
;ClickFinish=%n
|
||||||
|
|
||||||
|
[Registry]
|
||||||
|
Root: HKCU; Subkey: "Environment"; \
|
||||||
|
ValueType: expandsz; ValueName: "Path"; ValueData: "{olddata};{app}"; \
|
||||||
|
Check: NeedsAddPath('{app}')
|
||||||
|
|
||||||
|
[Code]
|
||||||
|
|
||||||
|
function NeedsAddPath(Param: string): boolean;
|
||||||
|
var
|
||||||
|
OrigPath: string;
|
||||||
|
begin
|
||||||
|
if not RegQueryStringValue(HKEY_CURRENT_USER,
|
||||||
|
'Environment',
|
||||||
|
'Path', OrigPath)
|
||||||
|
then begin
|
||||||
|
Result := True;
|
||||||
|
exit;
|
||||||
|
end;
|
||||||
|
{ look for the path with leading and trailing semicolon }
|
||||||
|
{ Pos() returns 0 if not found }
|
||||||
|
Result := Pos(';' + ExpandConstant(Param) + ';', ';' + OrigPath + ';') = 0;
|
||||||
|
end;
|
||||||
29
app/ollama.rc
Normal file
29
app/ollama.rc
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
#include <winver.h>
|
||||||
|
|
||||||
|
VS_VERSION_INFO VERSIONINFO
|
||||||
|
FILEFLAGSMASK 0x3fL
|
||||||
|
#ifdef _DEBUG
|
||||||
|
FILEFLAGS 0x1L
|
||||||
|
#else
|
||||||
|
FILEFLAGS 0x0L
|
||||||
|
#endif
|
||||||
|
FILEOS 0x40004L
|
||||||
|
FILETYPE 0x1L
|
||||||
|
FILESUBTYPE 0x0L
|
||||||
|
BEGIN
|
||||||
|
BLOCK "StringFileInfo"
|
||||||
|
BEGIN
|
||||||
|
BLOCK "040904b0"
|
||||||
|
BEGIN
|
||||||
|
VALUE "FileDescription", "Ollama"
|
||||||
|
VALUE "InternalName", "Ollama"
|
||||||
|
VALUE "OriginalFilename", "ollama app.exe"
|
||||||
|
VALUE "ProductName", "Ollama"
|
||||||
|
END
|
||||||
|
END
|
||||||
|
|
||||||
|
BLOCK "VarFileInfo"
|
||||||
|
BEGIN
|
||||||
|
VALUE "Translation", 0x409, 1200
|
||||||
|
END
|
||||||
|
END
|
||||||
8
app/ollama_welcome.ps1
Normal file
8
app/ollama_welcome.ps1
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# TODO - consider ANSI colors and maybe ASCII art...
|
||||||
|
write-host ""
|
||||||
|
write-host "Welcome to Ollama!"
|
||||||
|
write-host ""
|
||||||
|
write-host "Run your first model:"
|
||||||
|
write-host ""
|
||||||
|
write-host "`tollama run llama3"
|
||||||
|
write-host ""
|
||||||
97
app/store/store.go
Normal file
97
app/store/store.go
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Store struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
FirstTimeRun bool `json:"first-time-run"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
lock sync.Mutex
|
||||||
|
store Store
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetID() string {
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
if store.ID == "" {
|
||||||
|
initStore()
|
||||||
|
}
|
||||||
|
return store.ID
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFirstTimeRun() bool {
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
if store.ID == "" {
|
||||||
|
initStore()
|
||||||
|
}
|
||||||
|
return store.FirstTimeRun
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetFirstTimeRun(val bool) {
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
if store.FirstTimeRun == val {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
store.FirstTimeRun = val
|
||||||
|
writeStore(getStorePath())
|
||||||
|
}
|
||||||
|
|
||||||
|
// lock must be held
|
||||||
|
func initStore() {
|
||||||
|
storeFile, err := os.Open(getStorePath())
|
||||||
|
if err == nil {
|
||||||
|
defer storeFile.Close()
|
||||||
|
err = json.NewDecoder(storeFile).Decode(&store)
|
||||||
|
if err == nil {
|
||||||
|
slog.Debug(fmt.Sprintf("loaded existing store %s - ID: %s", getStorePath(), store.ID))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if !errors.Is(err, os.ErrNotExist) {
|
||||||
|
slog.Debug(fmt.Sprintf("unexpected error searching for store: %s", err))
|
||||||
|
}
|
||||||
|
slog.Debug("initializing new store")
|
||||||
|
store.ID = uuid.New().String()
|
||||||
|
writeStore(getStorePath())
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeStore(storeFilename string) {
|
||||||
|
ollamaDir := filepath.Dir(storeFilename)
|
||||||
|
_, err := os.Stat(ollamaDir)
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
if err := os.MkdirAll(ollamaDir, 0o755); err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("create ollama dir %s: %v", ollamaDir, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(store)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to marshal store: %s", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fp, err := os.OpenFile(storeFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("write store payload %s: %v", storeFilename, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
if n, err := fp.Write(payload); err != nil || n != len(payload) {
|
||||||
|
slog.Error(fmt.Sprintf("write store payload %s: %d vs %d -- %v", storeFilename, n, len(payload), err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
slog.Debug("Store contents: " + string(payload))
|
||||||
|
slog.Info(fmt.Sprintf("wrote store: %s", storeFilename))
|
||||||
|
}
|
||||||
13
app/store/store_darwin.go
Normal file
13
app/store/store_darwin.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getStorePath() string {
|
||||||
|
// TODO - system wide location?
|
||||||
|
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
return filepath.Join(home, "Library", "Application Support", "Ollama", "config.json")
|
||||||
|
}
|
||||||
16
app/store/store_linux.go
Normal file
16
app/store/store_linux.go
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getStorePath() string {
|
||||||
|
if os.Geteuid() == 0 {
|
||||||
|
// TODO where should we store this on linux for system-wide operation?
|
||||||
|
return "/etc/ollama/config.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
return filepath.Join(home, ".ollama", "config.json")
|
||||||
|
}
|
||||||
11
app/store/store_windows.go
Normal file
11
app/store/store_windows.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getStorePath() string {
|
||||||
|
localAppData := os.Getenv("LOCALAPPDATA")
|
||||||
|
return filepath.Join(localAppData, "Ollama", "config.json")
|
||||||
|
}
|
||||||
24
app/tray/commontray/types.go
Normal file
24
app/tray/commontray/types.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package commontray
|
||||||
|
|
||||||
|
var (
|
||||||
|
Title = "Ollama"
|
||||||
|
ToolTip = "Ollama"
|
||||||
|
|
||||||
|
UpdateIconName = "tray_upgrade"
|
||||||
|
IconName = "tray"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Callbacks struct {
|
||||||
|
Quit chan struct{}
|
||||||
|
Update chan struct{}
|
||||||
|
DoFirstUse chan struct{}
|
||||||
|
ShowLogs chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type OllamaTray interface {
|
||||||
|
GetCallbacks() Callbacks
|
||||||
|
Run()
|
||||||
|
UpdateAvailable(ver string) error
|
||||||
|
DisplayFirstUseNotification() error
|
||||||
|
Quit()
|
||||||
|
}
|
||||||
28
app/tray/tray.go
Normal file
28
app/tray/tray.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package tray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/app/assets"
|
||||||
|
"github.com/ollama/ollama/app/tray/commontray"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewTray() (commontray.OllamaTray, error) {
|
||||||
|
extension := ".png"
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
extension = ".ico"
|
||||||
|
}
|
||||||
|
iconName := commontray.UpdateIconName + extension
|
||||||
|
updateIcon, err := assets.GetIcon(iconName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load icon %s: %w", iconName, err)
|
||||||
|
}
|
||||||
|
iconName = commontray.IconName + extension
|
||||||
|
icon, err := assets.GetIcon(iconName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to load icon %s: %w", iconName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return InitPlatformTray(icon, updateIcon)
|
||||||
|
}
|
||||||
13
app/tray/tray_nonwindows.go
Normal file
13
app/tray/tray_nonwindows.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package tray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/app/tray/commontray"
|
||||||
|
)
|
||||||
|
|
||||||
|
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {
|
||||||
|
return nil, fmt.Errorf("NOT IMPLEMENTED YET")
|
||||||
|
}
|
||||||
10
app/tray/tray_windows.go
Normal file
10
app/tray/tray_windows.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package tray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/ollama/ollama/app/tray/commontray"
|
||||||
|
"github.com/ollama/ollama/app/tray/wintray"
|
||||||
|
)
|
||||||
|
|
||||||
|
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {
|
||||||
|
return wintray.InitTray(icon, updateIcon)
|
||||||
|
}
|
||||||
183
app/tray/wintray/eventloop.go
Normal file
183
app/tray/wintray/eventloop.go
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package wintray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
quitOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t *winTray) Run() {
|
||||||
|
nativeLoop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func nativeLoop() {
|
||||||
|
// Main message pump.
|
||||||
|
slog.Debug("starting event handling loop")
|
||||||
|
m := &struct {
|
||||||
|
WindowHandle windows.Handle
|
||||||
|
Message uint32
|
||||||
|
Wparam uintptr
|
||||||
|
Lparam uintptr
|
||||||
|
Time uint32
|
||||||
|
Pt point
|
||||||
|
LPrivate uint32
|
||||||
|
}{}
|
||||||
|
for {
|
||||||
|
ret, _, err := pGetMessage.Call(uintptr(unsafe.Pointer(m)), 0, 0, 0)
|
||||||
|
|
||||||
|
// If the function retrieves a message other than WM_QUIT, the return value is nonzero.
|
||||||
|
// If the function retrieves the WM_QUIT message, the return value is zero.
|
||||||
|
// If there is an error, the return value is -1
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644936(v=vs.85).aspx
|
||||||
|
switch int32(ret) {
|
||||||
|
case -1:
|
||||||
|
slog.Error(fmt.Sprintf("get message failure: %v", err))
|
||||||
|
return
|
||||||
|
case 0:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||||
|
pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WindowProc callback function that processes messages sent to a window.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms633573(v=vs.85).aspx
|
||||||
|
func (t *winTray) wndProc(hWnd windows.Handle, message uint32, wParam, lParam uintptr) (lResult uintptr) {
|
||||||
|
const (
|
||||||
|
WM_RBUTTONUP = 0x0205
|
||||||
|
WM_LBUTTONUP = 0x0202
|
||||||
|
WM_COMMAND = 0x0111
|
||||||
|
WM_ENDSESSION = 0x0016
|
||||||
|
WM_CLOSE = 0x0010
|
||||||
|
WM_DESTROY = 0x0002
|
||||||
|
WM_MOUSEMOVE = 0x0200
|
||||||
|
WM_LBUTTONDOWN = 0x0201
|
||||||
|
)
|
||||||
|
switch message {
|
||||||
|
case WM_COMMAND:
|
||||||
|
menuItemId := int32(wParam)
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/menurc/wm-command#menus
|
||||||
|
switch menuItemId {
|
||||||
|
case quitMenuID:
|
||||||
|
select {
|
||||||
|
case t.callbacks.Quit <- struct{}{}:
|
||||||
|
// should not happen but in case not listening
|
||||||
|
default:
|
||||||
|
slog.Error("no listener on Quit")
|
||||||
|
}
|
||||||
|
case updateMenuID:
|
||||||
|
select {
|
||||||
|
case t.callbacks.Update <- struct{}{}:
|
||||||
|
// should not happen but in case not listening
|
||||||
|
default:
|
||||||
|
slog.Error("no listener on Update")
|
||||||
|
}
|
||||||
|
case diagLogsMenuID:
|
||||||
|
select {
|
||||||
|
case t.callbacks.ShowLogs <- struct{}{}:
|
||||||
|
// should not happen but in case not listening
|
||||||
|
default:
|
||||||
|
slog.Error("no listener on ShowLogs")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
slog.Debug(fmt.Sprintf("Unexpected menu item id: %d", menuItemId))
|
||||||
|
}
|
||||||
|
case WM_CLOSE:
|
||||||
|
boolRet, _, err := pDestroyWindow.Call(uintptr(t.window))
|
||||||
|
if boolRet == 0 {
|
||||||
|
slog.Error(fmt.Sprintf("failed to destroy window: %s", err))
|
||||||
|
}
|
||||||
|
err = t.wcex.unregister()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to uregister windo %s", err))
|
||||||
|
}
|
||||||
|
case WM_DESTROY:
|
||||||
|
// same as WM_ENDSESSION, but throws 0 exit code after all
|
||||||
|
defer pPostQuitMessage.Call(uintptr(int32(0))) //nolint:errcheck
|
||||||
|
fallthrough
|
||||||
|
case WM_ENDSESSION:
|
||||||
|
t.muNID.Lock()
|
||||||
|
if t.nid != nil {
|
||||||
|
err := t.nid.delete()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to delete nid: %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.muNID.Unlock()
|
||||||
|
case t.wmSystrayMessage:
|
||||||
|
switch lParam {
|
||||||
|
case WM_MOUSEMOVE, WM_LBUTTONDOWN:
|
||||||
|
// Ignore these...
|
||||||
|
case WM_RBUTTONUP, WM_LBUTTONUP:
|
||||||
|
err := t.showMenu()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to show menu: %s", err))
|
||||||
|
}
|
||||||
|
case 0x405: // TODO - how is this magic value derived for the notification left click
|
||||||
|
if t.pendingUpdate {
|
||||||
|
select {
|
||||||
|
case t.callbacks.Update <- struct{}{}:
|
||||||
|
// should not happen but in case not listening
|
||||||
|
default:
|
||||||
|
slog.Error("no listener on Update")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case t.callbacks.DoFirstUse <- struct{}{}:
|
||||||
|
// should not happen but in case not listening
|
||||||
|
default:
|
||||||
|
slog.Error("no listener on DoFirstUse")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 0x404: // Middle click or close notification
|
||||||
|
// slog.Debug("doing nothing on close of first time notification")
|
||||||
|
default:
|
||||||
|
// 0x402 also seems common - what is it?
|
||||||
|
slog.Debug(fmt.Sprintf("unmanaged app message, lParm: 0x%x", lParam))
|
||||||
|
}
|
||||||
|
case t.wmTaskbarCreated: // on explorer.exe restarts
|
||||||
|
t.muNID.Lock()
|
||||||
|
err := t.nid.add()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("failed to refresh the taskbar on explorer restart: %s", err))
|
||||||
|
}
|
||||||
|
t.muNID.Unlock()
|
||||||
|
default:
|
||||||
|
// Calls the default window procedure to provide default processing for any window messages that an application does not process.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms633572(v=vs.85).aspx
|
||||||
|
lResult, _, _ = pDefWindowProc.Call(
|
||||||
|
uintptr(hWnd),
|
||||||
|
uintptr(message),
|
||||||
|
wParam,
|
||||||
|
lParam,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) Quit() {
|
||||||
|
quitOnce.Do(quit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func quit() {
|
||||||
|
boolRet, _, err := pPostMessage.Call(
|
||||||
|
uintptr(wt.window),
|
||||||
|
WM_CLOSE,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if boolRet == 0 {
|
||||||
|
slog.Error(fmt.Sprintf("failed to post close message on shutdown %s", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
71
app/tray/wintray/menus.go
Normal file
71
app/tray/wintray/menus.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package wintray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
updatAvailableMenuID = 1
|
||||||
|
updateMenuID = updatAvailableMenuID + 1
|
||||||
|
separatorMenuID = updateMenuID + 1
|
||||||
|
diagLogsMenuID = separatorMenuID + 1
|
||||||
|
diagSeparatorMenuID = diagLogsMenuID + 1
|
||||||
|
quitMenuID = diagSeparatorMenuID + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t *winTray) initMenus() error {
|
||||||
|
if err := t.addOrUpdateMenuItem(diagLogsMenuID, 0, diagLogsMenuTitle, false); err != nil {
|
||||||
|
return fmt.Errorf("unable to create menu entries %w\n", err)
|
||||||
|
}
|
||||||
|
if err := t.addSeparatorMenuItem(diagSeparatorMenuID, 0); err != nil {
|
||||||
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
|
}
|
||||||
|
if err := t.addOrUpdateMenuItem(quitMenuID, 0, quitMenuTitle, false); err != nil {
|
||||||
|
return fmt.Errorf("unable to create menu entries %w\n", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) UpdateAvailable(ver string) error {
|
||||||
|
if !t.updateNotified {
|
||||||
|
slog.Debug("updating menu and sending notification for new update")
|
||||||
|
if err := t.addOrUpdateMenuItem(updatAvailableMenuID, 0, updateAvailableMenuTitle, true); err != nil {
|
||||||
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
|
}
|
||||||
|
if err := t.addOrUpdateMenuItem(updateMenuID, 0, updateMenutTitle, false); err != nil {
|
||||||
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
|
}
|
||||||
|
if err := t.addSeparatorMenuItem(separatorMenuID, 0); err != nil {
|
||||||
|
return fmt.Errorf("unable to create menu entries %w", err)
|
||||||
|
}
|
||||||
|
iconFilePath, err := iconBytesToFilePath(wt.updateIcon)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to write icon data to temp file: %w", err)
|
||||||
|
}
|
||||||
|
if err := wt.setIcon(iconFilePath); err != nil {
|
||||||
|
return fmt.Errorf("unable to set icon: %w", err)
|
||||||
|
}
|
||||||
|
t.updateNotified = true
|
||||||
|
|
||||||
|
t.pendingUpdate = true
|
||||||
|
// Now pop up the notification
|
||||||
|
t.muNID.Lock()
|
||||||
|
defer t.muNID.Unlock()
|
||||||
|
copy(t.nid.InfoTitle[:], windows.StringToUTF16(updateTitle))
|
||||||
|
copy(t.nid.Info[:], windows.StringToUTF16(fmt.Sprintf(updateMessage, ver)))
|
||||||
|
t.nid.Flags |= NIF_INFO
|
||||||
|
t.nid.Timeout = 10
|
||||||
|
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
||||||
|
err = t.nid.modify()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
15
app/tray/wintray/messages.go
Normal file
15
app/tray/wintray/messages.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package wintray
|
||||||
|
|
||||||
|
const (
|
||||||
|
firstTimeTitle = "Ollama is running"
|
||||||
|
firstTimeMessage = "Click here to get started"
|
||||||
|
updateTitle = "Update available"
|
||||||
|
updateMessage = "Ollama version %s is ready to install"
|
||||||
|
|
||||||
|
quitMenuTitle = "Quit Ollama"
|
||||||
|
updateAvailableMenuTitle = "An update is available"
|
||||||
|
updateMenutTitle = "Restart to update"
|
||||||
|
diagLogsMenuTitle = "View logs"
|
||||||
|
)
|
||||||
66
app/tray/wintray/notifyicon.go
Normal file
66
app/tray/wintray/notifyicon.go
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package wintray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Contains information that the system needs to display notifications in the notification area.
|
||||||
|
// Used by Shell_NotifyIcon.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/bb773352(v=vs.85).aspx
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159
|
||||||
|
type notifyIconData struct {
|
||||||
|
Size uint32
|
||||||
|
Wnd windows.Handle
|
||||||
|
ID, Flags, CallbackMessage uint32
|
||||||
|
Icon windows.Handle
|
||||||
|
Tip [128]uint16
|
||||||
|
State, StateMask uint32
|
||||||
|
Info [256]uint16
|
||||||
|
// Timeout, Version uint32
|
||||||
|
Timeout uint32
|
||||||
|
|
||||||
|
InfoTitle [64]uint16
|
||||||
|
InfoFlags uint32
|
||||||
|
GuidItem windows.GUID
|
||||||
|
BalloonIcon windows.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nid *notifyIconData) add() error {
|
||||||
|
const NIM_ADD = 0x00000000
|
||||||
|
res, _, err := pShellNotifyIcon.Call(
|
||||||
|
uintptr(NIM_ADD),
|
||||||
|
uintptr(unsafe.Pointer(nid)),
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nid *notifyIconData) modify() error {
|
||||||
|
const NIM_MODIFY = 0x00000001
|
||||||
|
res, _, err := pShellNotifyIcon.Call(
|
||||||
|
uintptr(NIM_MODIFY),
|
||||||
|
uintptr(unsafe.Pointer(nid)),
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nid *notifyIconData) delete() error {
|
||||||
|
const NIM_DELETE = 0x00000002
|
||||||
|
res, _, err := pShellNotifyIcon.Call(
|
||||||
|
uintptr(NIM_DELETE),
|
||||||
|
uintptr(unsafe.Pointer(nid)),
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
481
app/tray/wintray/tray.go
Normal file
481
app/tray/wintray/tray.go
Normal file
@@ -0,0 +1,481 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package wintray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/app/tray/commontray"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Helpful sources: https://github.com/golang/exp/blob/master/shiny/driver/internal/win32
|
||||||
|
|
||||||
|
// Contains information about loaded resources
|
||||||
|
type winTray struct {
|
||||||
|
instance,
|
||||||
|
icon,
|
||||||
|
cursor,
|
||||||
|
window windows.Handle
|
||||||
|
|
||||||
|
loadedImages map[string]windows.Handle
|
||||||
|
muLoadedImages sync.RWMutex
|
||||||
|
|
||||||
|
// menus keeps track of the submenus keyed by the menu item ID, plus 0
|
||||||
|
// which corresponds to the main popup menu.
|
||||||
|
menus map[uint32]windows.Handle
|
||||||
|
muMenus sync.RWMutex
|
||||||
|
menuOf map[uint32]windows.Handle
|
||||||
|
muMenuOf sync.RWMutex
|
||||||
|
// menuItemIcons maintains the bitmap of each menu item (if applies). It's
|
||||||
|
// needed to show the icon correctly when showing a previously hidden menu
|
||||||
|
// item again.
|
||||||
|
// menuItemIcons map[uint32]windows.Handle
|
||||||
|
// muMenuItemIcons sync.RWMutex
|
||||||
|
visibleItems map[uint32][]uint32
|
||||||
|
muVisibleItems sync.RWMutex
|
||||||
|
|
||||||
|
nid *notifyIconData
|
||||||
|
muNID sync.RWMutex
|
||||||
|
wcex *wndClassEx
|
||||||
|
|
||||||
|
wmSystrayMessage,
|
||||||
|
wmTaskbarCreated uint32
|
||||||
|
|
||||||
|
pendingUpdate bool
|
||||||
|
updateNotified bool // Only pop up the notification once - TODO consider daily nag?
|
||||||
|
// Callbacks
|
||||||
|
callbacks commontray.Callbacks
|
||||||
|
normalIcon []byte
|
||||||
|
updateIcon []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
var wt winTray
|
||||||
|
|
||||||
|
func (t *winTray) GetCallbacks() commontray.Callbacks {
|
||||||
|
return t.callbacks
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitTray(icon, updateIcon []byte) (*winTray, error) {
|
||||||
|
wt.callbacks.Quit = make(chan struct{})
|
||||||
|
wt.callbacks.Update = make(chan struct{})
|
||||||
|
wt.callbacks.ShowLogs = make(chan struct{})
|
||||||
|
wt.callbacks.DoFirstUse = make(chan struct{})
|
||||||
|
wt.normalIcon = icon
|
||||||
|
wt.updateIcon = updateIcon
|
||||||
|
if err := wt.initInstance(); err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to init instance: %w\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := wt.createMenu(); err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to create menu: %w\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
iconFilePath, err := iconBytesToFilePath(wt.normalIcon)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to write icon data to temp file: %w", err)
|
||||||
|
}
|
||||||
|
if err := wt.setIcon(iconFilePath); err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to set icon: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &wt, wt.initMenus()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) initInstance() error {
|
||||||
|
const (
|
||||||
|
className = "OllamaClass"
|
||||||
|
windowName = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
t.wmSystrayMessage = WM_USER + 1
|
||||||
|
t.visibleItems = make(map[uint32][]uint32)
|
||||||
|
t.menus = make(map[uint32]windows.Handle)
|
||||||
|
t.menuOf = make(map[uint32]windows.Handle)
|
||||||
|
|
||||||
|
t.loadedImages = make(map[string]windows.Handle)
|
||||||
|
|
||||||
|
taskbarEventNamePtr, _ := windows.UTF16PtrFromString("TaskbarCreated")
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644947
|
||||||
|
res, _, err := pRegisterWindowMessage.Call(
|
||||||
|
uintptr(unsafe.Pointer(taskbarEventNamePtr)),
|
||||||
|
)
|
||||||
|
if res == 0 { // success 0xc000-0xfff
|
||||||
|
return fmt.Errorf("failed to register window: %w", err)
|
||||||
|
}
|
||||||
|
t.wmTaskbarCreated = uint32(res)
|
||||||
|
|
||||||
|
instanceHandle, _, err := pGetModuleHandle.Call(0)
|
||||||
|
if instanceHandle == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.instance = windows.Handle(instanceHandle)
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms648072(v=vs.85).aspx
|
||||||
|
iconHandle, _, err := pLoadIcon.Call(0, uintptr(IDI_APPLICATION))
|
||||||
|
if iconHandle == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.icon = windows.Handle(iconHandle)
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms648391(v=vs.85).aspx
|
||||||
|
cursorHandle, _, err := pLoadCursor.Call(0, uintptr(IDC_ARROW))
|
||||||
|
if cursorHandle == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.cursor = windows.Handle(cursorHandle)
|
||||||
|
|
||||||
|
classNamePtr, err := windows.UTF16PtrFromString(className)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
windowNamePtr, err := windows.UTF16PtrFromString(windowName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.wcex = &wndClassEx{
|
||||||
|
Style: CS_HREDRAW | CS_VREDRAW,
|
||||||
|
WndProc: windows.NewCallback(t.wndProc),
|
||||||
|
Instance: t.instance,
|
||||||
|
Icon: t.icon,
|
||||||
|
Cursor: t.cursor,
|
||||||
|
Background: windows.Handle(6), // (COLOR_WINDOW + 1)
|
||||||
|
ClassName: classNamePtr,
|
||||||
|
IconSm: t.icon,
|
||||||
|
}
|
||||||
|
if err := t.wcex.register(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
windowHandle, _, err := pCreateWindowEx.Call(
|
||||||
|
uintptr(0),
|
||||||
|
uintptr(unsafe.Pointer(classNamePtr)),
|
||||||
|
uintptr(unsafe.Pointer(windowNamePtr)),
|
||||||
|
uintptr(WS_OVERLAPPEDWINDOW),
|
||||||
|
uintptr(CW_USEDEFAULT),
|
||||||
|
uintptr(CW_USEDEFAULT),
|
||||||
|
uintptr(CW_USEDEFAULT),
|
||||||
|
uintptr(CW_USEDEFAULT),
|
||||||
|
uintptr(0),
|
||||||
|
uintptr(0),
|
||||||
|
uintptr(t.instance),
|
||||||
|
uintptr(0),
|
||||||
|
)
|
||||||
|
if windowHandle == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.window = windows.Handle(windowHandle)
|
||||||
|
|
||||||
|
pShowWindow.Call(uintptr(t.window), uintptr(SW_HIDE)) //nolint:errcheck
|
||||||
|
|
||||||
|
boolRet, _, err := pUpdateWindow.Call(uintptr(t.window))
|
||||||
|
if boolRet == 0 {
|
||||||
|
slog.Error(fmt.Sprintf("failed to update window: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.muNID.Lock()
|
||||||
|
defer t.muNID.Unlock()
|
||||||
|
t.nid = ¬ifyIconData{
|
||||||
|
Wnd: t.window,
|
||||||
|
ID: 100,
|
||||||
|
Flags: NIF_MESSAGE,
|
||||||
|
CallbackMessage: t.wmSystrayMessage,
|
||||||
|
}
|
||||||
|
t.nid.Size = uint32(unsafe.Sizeof(*t.nid))
|
||||||
|
|
||||||
|
return t.nid.add()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) createMenu() error {
|
||||||
|
menuHandle, _, err := pCreatePopupMenu.Call()
|
||||||
|
if menuHandle == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.menus[0] = windows.Handle(menuHandle)
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647575(v=vs.85).aspx
|
||||||
|
mi := struct {
|
||||||
|
Size, Mask, Style, Max uint32
|
||||||
|
Background windows.Handle
|
||||||
|
ContextHelpID uint32
|
||||||
|
MenuData uintptr
|
||||||
|
}{
|
||||||
|
Mask: MIM_APPLYTOSUBMENUS,
|
||||||
|
}
|
||||||
|
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||||
|
|
||||||
|
res, _, err := pSetMenuInfo.Call(
|
||||||
|
uintptr(t.menus[0]),
|
||||||
|
uintptr(unsafe.Pointer(&mi)),
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains information about a menu item.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647578(v=vs.85).aspx
|
||||||
|
type menuItemInfo struct {
|
||||||
|
Size, Mask, Type, State uint32
|
||||||
|
ID uint32
|
||||||
|
SubMenu, Checked, Unchecked windows.Handle
|
||||||
|
ItemData uintptr
|
||||||
|
TypeData *uint16
|
||||||
|
Cch uint32
|
||||||
|
BMPItem windows.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title string, disabled bool) error {
|
||||||
|
titlePtr, err := windows.UTF16PtrFromString(title)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mi := menuItemInfo{
|
||||||
|
Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE,
|
||||||
|
Type: MFT_STRING,
|
||||||
|
ID: menuItemId,
|
||||||
|
TypeData: titlePtr,
|
||||||
|
Cch: uint32(len(title)),
|
||||||
|
}
|
||||||
|
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||||
|
if disabled {
|
||||||
|
mi.State |= MFS_DISABLED
|
||||||
|
}
|
||||||
|
|
||||||
|
var res uintptr
|
||||||
|
t.muMenus.RLock()
|
||||||
|
menu := t.menus[parentId]
|
||||||
|
t.muMenus.RUnlock()
|
||||||
|
if t.getVisibleItemIndex(parentId, menuItemId) != -1 {
|
||||||
|
// We set the menu item info based on the menuID
|
||||||
|
boolRet, _, err := pSetMenuItemInfo.Call(
|
||||||
|
uintptr(menu),
|
||||||
|
uintptr(menuItemId),
|
||||||
|
0,
|
||||||
|
uintptr(unsafe.Pointer(&mi)),
|
||||||
|
)
|
||||||
|
if boolRet == 0 {
|
||||||
|
return fmt.Errorf("failed to set menu item: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if res == 0 {
|
||||||
|
// Menu item does not already exist, create it
|
||||||
|
t.muMenus.RLock()
|
||||||
|
submenu, exists := t.menus[menuItemId]
|
||||||
|
t.muMenus.RUnlock()
|
||||||
|
if exists {
|
||||||
|
mi.Mask |= MIIM_SUBMENU
|
||||||
|
mi.SubMenu = submenu
|
||||||
|
}
|
||||||
|
t.addToVisibleItems(parentId, menuItemId)
|
||||||
|
position := t.getVisibleItemIndex(parentId, menuItemId)
|
||||||
|
res, _, err = pInsertMenuItem.Call(
|
||||||
|
uintptr(menu),
|
||||||
|
uintptr(position),
|
||||||
|
1,
|
||||||
|
uintptr(unsafe.Pointer(&mi)),
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
t.delFromVisibleItems(parentId, menuItemId)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.muMenuOf.Lock()
|
||||||
|
t.menuOf[menuItemId] = menu
|
||||||
|
t.muMenuOf.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error {
|
||||||
|
mi := menuItemInfo{
|
||||||
|
Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE,
|
||||||
|
Type: MFT_SEPARATOR,
|
||||||
|
ID: menuItemId,
|
||||||
|
}
|
||||||
|
|
||||||
|
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||||
|
|
||||||
|
t.addToVisibleItems(parentId, menuItemId)
|
||||||
|
position := t.getVisibleItemIndex(parentId, menuItemId)
|
||||||
|
t.muMenus.RLock()
|
||||||
|
menu := uintptr(t.menus[parentId])
|
||||||
|
t.muMenus.RUnlock()
|
||||||
|
res, _, err := pInsertMenuItem.Call(
|
||||||
|
menu,
|
||||||
|
uintptr(position),
|
||||||
|
1,
|
||||||
|
uintptr(unsafe.Pointer(&mi)),
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// func (t *winTray) hideMenuItem(menuItemId, parentId uint32) error {
|
||||||
|
// const ERROR_SUCCESS syscall.Errno = 0
|
||||||
|
|
||||||
|
// t.muMenus.RLock()
|
||||||
|
// menu := uintptr(t.menus[parentId])
|
||||||
|
// t.muMenus.RUnlock()
|
||||||
|
// res, _, err := pRemoveMenu.Call(
|
||||||
|
// menu,
|
||||||
|
// uintptr(menuItemId),
|
||||||
|
// MF_BYCOMMAND,
|
||||||
|
// )
|
||||||
|
// if res == 0 && err.(syscall.Errno) != ERROR_SUCCESS {
|
||||||
|
// return err
|
||||||
|
// }
|
||||||
|
// t.delFromVisibleItems(parentId, menuItemId)
|
||||||
|
|
||||||
|
// return nil
|
||||||
|
// }
|
||||||
|
|
||||||
|
func (t *winTray) showMenu() error {
|
||||||
|
p := point{}
|
||||||
|
boolRet, _, err := pGetCursorPos.Call(uintptr(unsafe.Pointer(&p)))
|
||||||
|
if boolRet == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
boolRet, _, err = pSetForegroundWindow.Call(uintptr(t.window))
|
||||||
|
if boolRet == 0 {
|
||||||
|
slog.Warn(fmt.Sprintf("failed to bring menu to foreground: %s", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
boolRet, _, err = pTrackPopupMenu.Call(
|
||||||
|
uintptr(t.menus[0]),
|
||||||
|
TPM_BOTTOMALIGN|TPM_LEFTALIGN,
|
||||||
|
uintptr(p.X),
|
||||||
|
uintptr(p.Y),
|
||||||
|
0,
|
||||||
|
uintptr(t.window),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if boolRet == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) delFromVisibleItems(parent, val uint32) {
|
||||||
|
t.muVisibleItems.Lock()
|
||||||
|
defer t.muVisibleItems.Unlock()
|
||||||
|
visibleItems := t.visibleItems[parent]
|
||||||
|
for i, itemval := range visibleItems {
|
||||||
|
if val == itemval {
|
||||||
|
t.visibleItems[parent] = append(visibleItems[:i], visibleItems[i+1:]...)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) addToVisibleItems(parent, val uint32) {
|
||||||
|
t.muVisibleItems.Lock()
|
||||||
|
defer t.muVisibleItems.Unlock()
|
||||||
|
if visibleItems, exists := t.visibleItems[parent]; !exists {
|
||||||
|
t.visibleItems[parent] = []uint32{val}
|
||||||
|
} else {
|
||||||
|
newvisible := append(visibleItems, val)
|
||||||
|
sort.Slice(newvisible, func(i, j int) bool { return newvisible[i] < newvisible[j] })
|
||||||
|
t.visibleItems[parent] = newvisible
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) getVisibleItemIndex(parent, val uint32) int {
|
||||||
|
t.muVisibleItems.RLock()
|
||||||
|
defer t.muVisibleItems.RUnlock()
|
||||||
|
for i, itemval := range t.visibleItems[parent] {
|
||||||
|
if val == itemval {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func iconBytesToFilePath(iconBytes []byte) (string, error) {
|
||||||
|
bh := md5.Sum(iconBytes)
|
||||||
|
dataHash := hex.EncodeToString(bh[:])
|
||||||
|
iconFilePath := filepath.Join(os.TempDir(), "ollama_temp_icon_"+dataHash)
|
||||||
|
|
||||||
|
if _, err := os.Stat(iconFilePath); os.IsNotExist(err) {
|
||||||
|
if err := os.WriteFile(iconFilePath, iconBytes, 0644); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iconFilePath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loads an image from file and shows it in tray.
|
||||||
|
// Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx
|
||||||
|
func (t *winTray) setIcon(src string) error {
|
||||||
|
h, err := t.loadIconFrom(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.muNID.Lock()
|
||||||
|
defer t.muNID.Unlock()
|
||||||
|
t.nid.Icon = h
|
||||||
|
t.nid.Flags |= NIF_ICON
|
||||||
|
t.nid.Size = uint32(unsafe.Sizeof(*t.nid))
|
||||||
|
|
||||||
|
return t.nid.modify()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loads an image from file to be shown in tray or menu item.
|
||||||
|
// LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx
|
||||||
|
func (t *winTray) loadIconFrom(src string) (windows.Handle, error) {
|
||||||
|
// Save and reuse handles of loaded images
|
||||||
|
t.muLoadedImages.RLock()
|
||||||
|
h, ok := t.loadedImages[src]
|
||||||
|
t.muLoadedImages.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
srcPtr, err := windows.UTF16PtrFromString(src)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
res, _, err := pLoadImage.Call(
|
||||||
|
0,
|
||||||
|
uintptr(unsafe.Pointer(srcPtr)),
|
||||||
|
IMAGE_ICON,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
LR_LOADFROMFILE|LR_DEFAULTSIZE,
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
h = windows.Handle(res)
|
||||||
|
t.muLoadedImages.Lock()
|
||||||
|
t.loadedImages[src] = h
|
||||||
|
t.muLoadedImages.Unlock()
|
||||||
|
}
|
||||||
|
return h, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *winTray) DisplayFirstUseNotification() error {
|
||||||
|
t.muNID.Lock()
|
||||||
|
defer t.muNID.Unlock()
|
||||||
|
copy(t.nid.InfoTitle[:], windows.StringToUTF16(firstTimeTitle))
|
||||||
|
copy(t.nid.Info[:], windows.StringToUTF16(firstTimeMessage))
|
||||||
|
t.nid.Flags |= NIF_INFO
|
||||||
|
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
||||||
|
|
||||||
|
return t.nid.modify()
|
||||||
|
}
|
||||||
89
app/tray/wintray/w32api.go
Normal file
89
app/tray/wintray/w32api.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package wintray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
k32 = windows.NewLazySystemDLL("Kernel32.dll")
|
||||||
|
u32 = windows.NewLazySystemDLL("User32.dll")
|
||||||
|
s32 = windows.NewLazySystemDLL("Shell32.dll")
|
||||||
|
|
||||||
|
pCreatePopupMenu = u32.NewProc("CreatePopupMenu")
|
||||||
|
pCreateWindowEx = u32.NewProc("CreateWindowExW")
|
||||||
|
pDefWindowProc = u32.NewProc("DefWindowProcW")
|
||||||
|
pDestroyWindow = u32.NewProc("DestroyWindow")
|
||||||
|
pDispatchMessage = u32.NewProc("DispatchMessageW")
|
||||||
|
pGetCursorPos = u32.NewProc("GetCursorPos")
|
||||||
|
pGetMessage = u32.NewProc("GetMessageW")
|
||||||
|
pGetModuleHandle = k32.NewProc("GetModuleHandleW")
|
||||||
|
pInsertMenuItem = u32.NewProc("InsertMenuItemW")
|
||||||
|
pLoadCursor = u32.NewProc("LoadCursorW")
|
||||||
|
pLoadIcon = u32.NewProc("LoadIconW")
|
||||||
|
pLoadImage = u32.NewProc("LoadImageW")
|
||||||
|
pPostMessage = u32.NewProc("PostMessageW")
|
||||||
|
pPostQuitMessage = u32.NewProc("PostQuitMessage")
|
||||||
|
pRegisterClass = u32.NewProc("RegisterClassExW")
|
||||||
|
pRegisterWindowMessage = u32.NewProc("RegisterWindowMessageW")
|
||||||
|
pSetForegroundWindow = u32.NewProc("SetForegroundWindow")
|
||||||
|
pSetMenuInfo = u32.NewProc("SetMenuInfo")
|
||||||
|
pSetMenuItemInfo = u32.NewProc("SetMenuItemInfoW")
|
||||||
|
pShellNotifyIcon = s32.NewProc("Shell_NotifyIconW")
|
||||||
|
pShowWindow = u32.NewProc("ShowWindow")
|
||||||
|
pTrackPopupMenu = u32.NewProc("TrackPopupMenu")
|
||||||
|
pTranslateMessage = u32.NewProc("TranslateMessage")
|
||||||
|
pUnregisterClass = u32.NewProc("UnregisterClassW")
|
||||||
|
pUpdateWindow = u32.NewProc("UpdateWindow")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
CS_HREDRAW = 0x0002
|
||||||
|
CS_VREDRAW = 0x0001
|
||||||
|
CW_USEDEFAULT = 0x80000000
|
||||||
|
IDC_ARROW = 32512 // Standard arrow
|
||||||
|
IDI_APPLICATION = 32512
|
||||||
|
IMAGE_ICON = 1 // Loads an icon
|
||||||
|
LR_DEFAULTSIZE = 0x00000040 // Loads default-size icon for windows(SM_CXICON x SM_CYICON) if cx, cy are set to zero
|
||||||
|
LR_LOADFROMFILE = 0x00000010 // Loads the stand-alone image from the file
|
||||||
|
MF_BYCOMMAND = 0x00000000
|
||||||
|
MFS_DISABLED = 0x00000003
|
||||||
|
MFT_SEPARATOR = 0x00000800
|
||||||
|
MFT_STRING = 0x00000000
|
||||||
|
MIIM_BITMAP = 0x00000080
|
||||||
|
MIIM_FTYPE = 0x00000100
|
||||||
|
MIIM_ID = 0x00000002
|
||||||
|
MIIM_STATE = 0x00000001
|
||||||
|
MIIM_STRING = 0x00000040
|
||||||
|
MIIM_SUBMENU = 0x00000004
|
||||||
|
MIM_APPLYTOSUBMENUS = 0x80000000
|
||||||
|
NIF_ICON = 0x00000002
|
||||||
|
NIF_INFO = 0x00000010
|
||||||
|
NIF_MESSAGE = 0x00000001
|
||||||
|
SW_HIDE = 0
|
||||||
|
TPM_BOTTOMALIGN = 0x0020
|
||||||
|
TPM_LEFTALIGN = 0x0000
|
||||||
|
WM_CLOSE = 0x0010
|
||||||
|
WM_USER = 0x0400
|
||||||
|
WS_CAPTION = 0x00C00000
|
||||||
|
WS_MAXIMIZEBOX = 0x00010000
|
||||||
|
WS_MINIMIZEBOX = 0x00020000
|
||||||
|
WS_OVERLAPPED = 0x00000000
|
||||||
|
WS_OVERLAPPEDWINDOW = WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX
|
||||||
|
WS_SYSMENU = 0x00080000
|
||||||
|
WS_THICKFRAME = 0x00040000
|
||||||
|
)
|
||||||
|
|
||||||
|
// Not sure if this is actually needed on windows
|
||||||
|
func init() {
|
||||||
|
runtime.LockOSThread()
|
||||||
|
}
|
||||||
|
|
||||||
|
// The POINT structure defines the x- and y- coordinates of a point.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/dd162805(v=vs.85).aspx
|
||||||
|
type point struct {
|
||||||
|
X, Y int32
|
||||||
|
}
|
||||||
45
app/tray/wintray/winclass.go
Normal file
45
app/tray/wintray/winclass.go
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package wintray
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Contains window class information.
|
||||||
|
// It is used with the RegisterClassEx and GetClassInfoEx functions.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/ms633577.aspx
|
||||||
|
type wndClassEx struct {
|
||||||
|
Size, Style uint32
|
||||||
|
WndProc uintptr
|
||||||
|
ClsExtra, WndExtra int32
|
||||||
|
Instance, Icon, Cursor, Background windows.Handle
|
||||||
|
MenuName, ClassName *uint16
|
||||||
|
IconSm windows.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
// Registers a window class for subsequent use in calls to the CreateWindow or CreateWindowEx function.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/ms633587.aspx
|
||||||
|
func (w *wndClassEx) register() error {
|
||||||
|
w.Size = uint32(unsafe.Sizeof(*w))
|
||||||
|
res, _, err := pRegisterClass.Call(uintptr(unsafe.Pointer(w)))
|
||||||
|
if res == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unregisters a window class, freeing the memory required for the class.
|
||||||
|
// https://msdn.microsoft.com/en-us/library/ms644899.aspx
|
||||||
|
func (w *wndClassEx) unregister() error {
|
||||||
|
res, _, err := pUnregisterClass.Call(
|
||||||
|
uintptr(unsafe.Pointer(w.ClassName)),
|
||||||
|
uintptr(w.Instance),
|
||||||
|
)
|
||||||
|
if res == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
91
auth/auth.go
Normal file
91
auth/auth.go
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
package auth
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultPrivateKey = "id_ed25519"
|
||||||
|
|
||||||
|
func keyPath() (string, error) {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Join(home, ".ollama", defaultPrivateKey), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetPublicKey() (string, error) {
|
||||||
|
keyPath, err := keyPath()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKeyFile, err := os.ReadFile(keyPath)
|
||||||
|
if err != nil {
|
||||||
|
slog.Info(fmt.Sprintf("Failed to load private key: %v", err))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKey, err := ssh.ParsePrivateKey(privateKeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
publicKey := ssh.MarshalAuthorizedKey(privateKey.PublicKey())
|
||||||
|
|
||||||
|
return strings.TrimSpace(string(publicKey)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewNonce(r io.Reader, length int) (string, error) {
|
||||||
|
nonce := make([]byte, length)
|
||||||
|
if _, err := io.ReadFull(r, nonce); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return base64.RawURLEncoding.EncodeToString(nonce), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Sign(ctx context.Context, bts []byte) (string, error) {
|
||||||
|
keyPath, err := keyPath()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKeyFile, err := os.ReadFile(keyPath)
|
||||||
|
if err != nil {
|
||||||
|
slog.Info(fmt.Sprintf("Failed to load private key: %v", err))
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
privateKey, err := ssh.ParsePrivateKey(privateKeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the pubkey, but remove the type
|
||||||
|
publicKey := ssh.MarshalAuthorizedKey(privateKey.PublicKey())
|
||||||
|
parts := bytes.Split(publicKey, []byte(" "))
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return "", fmt.Errorf("malformed public key")
|
||||||
|
}
|
||||||
|
|
||||||
|
signedData, err := privateKey.Sign(rand.Reader, bts)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// signature is <pubkey>:<signature>
|
||||||
|
return fmt.Sprintf("%s:%s", bytes.TrimSpace(parts[1]), base64.StdEncoding.EncodeToString(signedData.Blob)), nil
|
||||||
|
}
|
||||||
1149
cmd/cmd.go
1149
cmd/cmd.go
File diff suppressed because it is too large
Load Diff
691
cmd/interactive.go
Normal file
691
cmd/interactive.go
Normal file
@@ -0,0 +1,691 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"slices"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
|
"github.com/ollama/ollama/progress"
|
||||||
|
"github.com/ollama/ollama/readline"
|
||||||
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MultilineState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
MultilineNone MultilineState = iota
|
||||||
|
MultilinePrompt
|
||||||
|
MultilineSystem
|
||||||
|
MultilineTemplate
|
||||||
|
)
|
||||||
|
|
||||||
|
func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p := progress.NewProgress(os.Stderr)
|
||||||
|
defer p.StopAndClear()
|
||||||
|
|
||||||
|
spinner := progress.NewSpinner("")
|
||||||
|
p.Add("", spinner)
|
||||||
|
|
||||||
|
showReq := api.ShowRequest{Name: opts.Model}
|
||||||
|
showResp, err := client.Show(cmd.Context(), &showReq)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.MultiModal = slices.Contains(showResp.Details.Families, "clip")
|
||||||
|
opts.ParentModel = showResp.Details.ParentModel
|
||||||
|
|
||||||
|
if len(showResp.Messages) > 0 {
|
||||||
|
opts.Messages = append(opts.Messages, showResp.Messages...)
|
||||||
|
}
|
||||||
|
|
||||||
|
chatReq := &api.ChatRequest{
|
||||||
|
Model: opts.Model,
|
||||||
|
Messages: []api.Message{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.KeepAlive != nil {
|
||||||
|
chatReq.KeepAlive = opts.KeepAlive
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
||||||
|
p.StopAndClear()
|
||||||
|
if len(opts.Messages) > 0 {
|
||||||
|
for _, msg := range opts.Messages {
|
||||||
|
switch msg.Role {
|
||||||
|
case "user":
|
||||||
|
fmt.Printf(">>> %s\n", msg.Content)
|
||||||
|
case "assistant":
|
||||||
|
state := &displayResponseState{}
|
||||||
|
displayResponse(msg.Content, opts.WordWrap, state)
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||||
|
opts.Messages = make([]api.Message, 0)
|
||||||
|
|
||||||
|
err := loadModel(cmd, &opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set Set session variables")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show Show model information")
|
||||||
|
fmt.Fprintln(os.Stderr, " /load <model> Load a session or model")
|
||||||
|
fmt.Fprintln(os.Stderr, " /save <model> Save your current session")
|
||||||
|
fmt.Fprintln(os.Stderr, " /clear Clear session context")
|
||||||
|
fmt.Fprintln(os.Stderr, " /bye Exit")
|
||||||
|
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
|
||||||
|
fmt.Fprintln(os.Stderr, " /? shortcuts Help for keyboard shortcuts")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
fmt.Fprintln(os.Stderr, "Use \"\"\" to begin a multi-line message.")
|
||||||
|
|
||||||
|
if opts.MultiModal {
|
||||||
|
fmt.Fprintf(os.Stderr, "Use %s to include .jpg or .png images.\n", filepath.FromSlash("/path/to/file"))
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
usageSet := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter ... Set a parameter")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set system <string> Set system message")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set template <string> Set prompt template")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set history Enable history")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set nohistory Disable history")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set wordwrap Enable wordwrap")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set nowordwrap Disable wordwrap")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set format json Enable JSON mode")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set noformat Disable formatting")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set verbose Show LLM stats")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set quiet Disable LLM stats")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
usageShortcuts := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available keyboard shortcuts:")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + a Move to the beginning of the line (Home)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + e Move to the end of the line (End)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Alt + b Move back (left) one word")
|
||||||
|
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
||||||
|
fmt.Fprintln(os.Stderr, " Ctrl + d Exit ollama (/bye)")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
usageShow := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show info Show details for this model")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show license Show model license")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show modelfile Show Modelfile for this model")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show parameters Show parameters for this model")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show system Show system message")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show template Show prompt template")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// only list out the most common parameters
|
||||||
|
usageParameters := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Parameters:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter seed <int> Random number seed")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter num_predict <int> Max number of tokens to predict")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter top_k <int> Pick from top k num of tokens")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter top_p <float> Pick token based on sum of probabilities")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter num_ctx <int> Set the context size")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter temperature <float> Set creativity level")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_penalty <float> How strongly to penalize repetitions")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_last_n <int> Set how far back to look for repetitions")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter num_gpu <int> The number of layers to send to the GPU")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter stop <string> <string> ... Set the stop parameters")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner, err := readline.New(readline.Prompt{
|
||||||
|
Prompt: ">>> ",
|
||||||
|
AltPrompt: "... ",
|
||||||
|
Placeholder: "Send a message (/? for help)",
|
||||||
|
AltPlaceholder: `Use """ to end multi-line input`,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if envconfig.NoHistory {
|
||||||
|
scanner.HistoryDisable()
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(readline.StartBracketedPaste)
|
||||||
|
defer fmt.Printf(readline.EndBracketedPaste)
|
||||||
|
|
||||||
|
var sb strings.Builder
|
||||||
|
var multiline MultilineState
|
||||||
|
|
||||||
|
for {
|
||||||
|
line, err := scanner.Readline()
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, io.EOF):
|
||||||
|
fmt.Println()
|
||||||
|
return nil
|
||||||
|
case errors.Is(err, readline.ErrInterrupt):
|
||||||
|
if line == "" {
|
||||||
|
fmt.Println("\nUse Ctrl + d or /bye to exit.")
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner.Prompt.UseAlt = false
|
||||||
|
sb.Reset()
|
||||||
|
|
||||||
|
continue
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case multiline != MultilineNone:
|
||||||
|
// check if there's a multiline terminating string
|
||||||
|
before, ok := strings.CutSuffix(line, `"""`)
|
||||||
|
sb.WriteString(before)
|
||||||
|
if !ok {
|
||||||
|
fmt.Fprintln(&sb)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch multiline {
|
||||||
|
case MultilineSystem:
|
||||||
|
opts.System = sb.String()
|
||||||
|
opts.Messages = append(opts.Messages, api.Message{Role: "system", Content: opts.System})
|
||||||
|
fmt.Println("Set system message.")
|
||||||
|
sb.Reset()
|
||||||
|
case MultilineTemplate:
|
||||||
|
opts.Template = sb.String()
|
||||||
|
fmt.Println("Set prompt template.")
|
||||||
|
sb.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
multiline = MultilineNone
|
||||||
|
scanner.Prompt.UseAlt = false
|
||||||
|
case strings.HasPrefix(line, `"""`):
|
||||||
|
line := strings.TrimPrefix(line, `"""`)
|
||||||
|
line, ok := strings.CutSuffix(line, `"""`)
|
||||||
|
sb.WriteString(line)
|
||||||
|
if !ok {
|
||||||
|
// no multiline terminating string; need more input
|
||||||
|
fmt.Fprintln(&sb)
|
||||||
|
multiline = MultilinePrompt
|
||||||
|
scanner.Prompt.UseAlt = true
|
||||||
|
}
|
||||||
|
case scanner.Pasting:
|
||||||
|
fmt.Fprintln(&sb, line)
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(line, "/list"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if err := ListHandler(cmd, args[1:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(line, "/load"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) != 2 {
|
||||||
|
fmt.Println("Usage:\n /load <modelname>")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
opts.Model = args[1]
|
||||||
|
opts.Messages = []api.Message{}
|
||||||
|
fmt.Printf("Loading model '%s'\n", opts.Model)
|
||||||
|
if err := loadModel(cmd, &opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(line, "/save"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) != 2 {
|
||||||
|
fmt.Println("Usage:\n /save <modelname>")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("error: couldn't connect to ollama server")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &api.CreateRequest{
|
||||||
|
Name: args[1],
|
||||||
|
Modelfile: buildModelfile(opts),
|
||||||
|
}
|
||||||
|
fn := func(resp api.ProgressResponse) error { return nil }
|
||||||
|
err = client.Create(cmd.Context(), req, fn)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), errtypes.InvalidModelNameErrMsg) {
|
||||||
|
fmt.Printf("error: The model name '%s' is invalid\n", args[1])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Printf("Created new model '%s'\n", args[1])
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(line, "/clear"):
|
||||||
|
opts.Messages = []api.Message{}
|
||||||
|
if opts.System != "" {
|
||||||
|
newMessage := api.Message{Role: "system", Content: opts.System}
|
||||||
|
opts.Messages = append(opts.Messages, newMessage)
|
||||||
|
}
|
||||||
|
fmt.Println("Cleared session context")
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(line, "/set"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) > 1 {
|
||||||
|
switch args[1] {
|
||||||
|
case "history":
|
||||||
|
scanner.HistoryEnable()
|
||||||
|
case "nohistory":
|
||||||
|
scanner.HistoryDisable()
|
||||||
|
case "wordwrap":
|
||||||
|
opts.WordWrap = true
|
||||||
|
fmt.Println("Set 'wordwrap' mode.")
|
||||||
|
case "nowordwrap":
|
||||||
|
opts.WordWrap = false
|
||||||
|
fmt.Println("Set 'nowordwrap' mode.")
|
||||||
|
case "verbose":
|
||||||
|
if err := cmd.Flags().Set("verbose", "true"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Println("Set 'verbose' mode.")
|
||||||
|
case "quiet":
|
||||||
|
if err := cmd.Flags().Set("verbose", "false"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fmt.Println("Set 'quiet' mode.")
|
||||||
|
case "format":
|
||||||
|
if len(args) < 3 || args[2] != "json" {
|
||||||
|
fmt.Println("Invalid or missing format. For 'json' mode use '/set format json'")
|
||||||
|
} else {
|
||||||
|
opts.Format = args[2]
|
||||||
|
fmt.Printf("Set format to '%s' mode.\n", args[2])
|
||||||
|
}
|
||||||
|
case "noformat":
|
||||||
|
opts.Format = ""
|
||||||
|
fmt.Println("Disabled format.")
|
||||||
|
case "parameter":
|
||||||
|
if len(args) < 4 {
|
||||||
|
usageParameters()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
params := args[3:]
|
||||||
|
fp, err := api.FormatParams(map[string][]string{args[2]: params})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Couldn't set parameter: %q\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Set parameter '%s' to '%s'\n", args[2], strings.Join(params, ", "))
|
||||||
|
opts.Options[args[2]] = fp[args[2]]
|
||||||
|
case "system", "template":
|
||||||
|
if len(args) < 3 {
|
||||||
|
usageSet()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if args[1] == "system" {
|
||||||
|
multiline = MultilineSystem
|
||||||
|
} else if args[1] == "template" {
|
||||||
|
multiline = MultilineTemplate
|
||||||
|
}
|
||||||
|
|
||||||
|
line := strings.Join(args[2:], " ")
|
||||||
|
line, ok := strings.CutPrefix(line, `"""`)
|
||||||
|
if !ok {
|
||||||
|
multiline = MultilineNone
|
||||||
|
} else {
|
||||||
|
// only cut suffix if the line is multiline
|
||||||
|
line, ok = strings.CutSuffix(line, `"""`)
|
||||||
|
if ok {
|
||||||
|
multiline = MultilineNone
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(line)
|
||||||
|
if multiline != MultilineNone {
|
||||||
|
scanner.Prompt.UseAlt = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if args[1] == "system" {
|
||||||
|
opts.System = sb.String() // for display in modelfile
|
||||||
|
newMessage := api.Message{Role: "system", Content: sb.String()}
|
||||||
|
// Check if the slice is not empty and the last message is from 'system'
|
||||||
|
if len(opts.Messages) > 0 && opts.Messages[len(opts.Messages)-1].Role == "system" {
|
||||||
|
// Replace the last message
|
||||||
|
opts.Messages[len(opts.Messages)-1] = newMessage
|
||||||
|
} else {
|
||||||
|
opts.Messages = append(opts.Messages, newMessage)
|
||||||
|
}
|
||||||
|
fmt.Println("Set system message.")
|
||||||
|
sb.Reset()
|
||||||
|
} else if args[1] == "template" {
|
||||||
|
opts.Template = sb.String()
|
||||||
|
fmt.Println("Set prompt template.")
|
||||||
|
sb.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.Reset()
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
usageSet()
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(line, "/show"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) > 1 {
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("error: couldn't connect to ollama server")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req := &api.ShowRequest{
|
||||||
|
Name: opts.Model,
|
||||||
|
System: opts.System,
|
||||||
|
Template: opts.Template,
|
||||||
|
Options: opts.Options,
|
||||||
|
}
|
||||||
|
resp, err := client.Show(cmd.Context(), req)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("error: couldn't get model")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch args[1] {
|
||||||
|
case "info":
|
||||||
|
fmt.Println("Model details:")
|
||||||
|
if len(resp.Details.Families) > 0 {
|
||||||
|
fmt.Printf("Family %s\n", strings.Join(resp.Details.Families, ", "))
|
||||||
|
} else if resp.Details.Family != "" {
|
||||||
|
fmt.Printf("Family %s\n", resp.Details.Family)
|
||||||
|
}
|
||||||
|
fmt.Printf("Parameter Size %s\n", resp.Details.ParameterSize)
|
||||||
|
fmt.Printf("Quantization Level %s\n", resp.Details.QuantizationLevel)
|
||||||
|
fmt.Println("")
|
||||||
|
case "license":
|
||||||
|
if resp.License == "" {
|
||||||
|
fmt.Println("No license was specified for this model.")
|
||||||
|
} else {
|
||||||
|
fmt.Println(resp.License)
|
||||||
|
}
|
||||||
|
case "modelfile":
|
||||||
|
fmt.Println(resp.Modelfile)
|
||||||
|
case "parameters":
|
||||||
|
if resp.Parameters == "" {
|
||||||
|
fmt.Println("No parameters were specified for this model.")
|
||||||
|
} else {
|
||||||
|
if len(opts.Options) > 0 {
|
||||||
|
fmt.Println("User defined parameters:")
|
||||||
|
for k, v := range opts.Options {
|
||||||
|
fmt.Printf("%-*s %v\n", 30, k, v)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
fmt.Println("Model defined parameters:")
|
||||||
|
fmt.Println(resp.Parameters)
|
||||||
|
}
|
||||||
|
case "system":
|
||||||
|
switch {
|
||||||
|
case opts.System != "":
|
||||||
|
fmt.Println(opts.System + "\n")
|
||||||
|
case resp.System != "":
|
||||||
|
fmt.Println(resp.System + "\n")
|
||||||
|
default:
|
||||||
|
fmt.Println("No system message was specified for this model.")
|
||||||
|
}
|
||||||
|
case "template":
|
||||||
|
switch {
|
||||||
|
case opts.Template != "":
|
||||||
|
fmt.Println(opts.Template + "\n")
|
||||||
|
case resp.Template != "":
|
||||||
|
fmt.Println(resp.Template)
|
||||||
|
default:
|
||||||
|
fmt.Println("No prompt template was specified for this model.")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown command '/show %s'. Type /? for help\n", args[1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
usageShow()
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(line, "/help"), strings.HasPrefix(line, "/?"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) > 1 {
|
||||||
|
switch args[1] {
|
||||||
|
case "set", "/set":
|
||||||
|
usageSet()
|
||||||
|
case "show", "/show":
|
||||||
|
usageShow()
|
||||||
|
case "shortcut", "shortcuts":
|
||||||
|
usageShortcuts()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
usage()
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(line, "/exit"), strings.HasPrefix(line, "/bye"):
|
||||||
|
return nil
|
||||||
|
case strings.HasPrefix(line, "/"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
isFile := false
|
||||||
|
|
||||||
|
if opts.MultiModal {
|
||||||
|
for _, f := range extractFileNames(line) {
|
||||||
|
if strings.HasPrefix(f, args[0]) {
|
||||||
|
isFile = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFile {
|
||||||
|
fmt.Printf("Unknown command '%s'. Type /? for help\n", args[0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.WriteString(line)
|
||||||
|
default:
|
||||||
|
sb.WriteString(line)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sb.Len() > 0 && multiline == MultilineNone {
|
||||||
|
newMessage := api.Message{Role: "user", Content: sb.String()}
|
||||||
|
|
||||||
|
if opts.MultiModal {
|
||||||
|
msg, images, err := extractFileData(sb.String())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear all previous images for better responses
|
||||||
|
if len(images) > 0 {
|
||||||
|
for i := range opts.Messages {
|
||||||
|
opts.Messages[i].Images = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newMessage.Content = msg
|
||||||
|
newMessage.Images = images
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.Messages = append(opts.Messages, newMessage)
|
||||||
|
|
||||||
|
assistant, err := chat(cmd, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if assistant != nil {
|
||||||
|
opts.Messages = append(opts.Messages, *assistant)
|
||||||
|
}
|
||||||
|
|
||||||
|
sb.Reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildModelfile(opts runOptions) string {
|
||||||
|
var mf strings.Builder
|
||||||
|
model := opts.ParentModel
|
||||||
|
if model == "" {
|
||||||
|
model = opts.Model
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&mf, "FROM %s\n", model)
|
||||||
|
if opts.System != "" {
|
||||||
|
fmt.Fprintf(&mf, "SYSTEM \"\"\"%s\"\"\"\n", opts.System)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Template != "" {
|
||||||
|
fmt.Fprintf(&mf, "TEMPLATE \"\"\"%s\"\"\"\n", opts.Template)
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := make([]string, 0)
|
||||||
|
for k := range opts.Options {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
for _, k := range keys {
|
||||||
|
fmt.Fprintf(&mf, "PARAMETER %s %v\n", k, opts.Options[k])
|
||||||
|
}
|
||||||
|
fmt.Fprintln(&mf)
|
||||||
|
|
||||||
|
for _, msg := range opts.Messages {
|
||||||
|
fmt.Fprintf(&mf, "MESSAGE %s \"\"\"%s\"\"\"\n", msg.Role, msg.Content)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeFilePath(fp string) string {
|
||||||
|
// Define a map of escaped characters and their replacements
|
||||||
|
replacements := map[string]string{
|
||||||
|
"\\ ": " ", // Escaped space
|
||||||
|
"\\(": "(", // Escaped left parenthesis
|
||||||
|
"\\)": ")", // Escaped right parenthesis
|
||||||
|
"\\[": "[", // Escaped left square bracket
|
||||||
|
"\\]": "]", // Escaped right square bracket
|
||||||
|
"\\{": "{", // Escaped left curly brace
|
||||||
|
"\\}": "}", // Escaped right curly brace
|
||||||
|
"\\$": "$", // Escaped dollar sign
|
||||||
|
"\\&": "&", // Escaped ampersand
|
||||||
|
"\\;": ";", // Escaped semicolon
|
||||||
|
"\\'": "'", // Escaped single quote
|
||||||
|
"\\\\": "\\", // Escaped backslash
|
||||||
|
"\\*": "*", // Escaped asterisk
|
||||||
|
"\\?": "?", // Escaped question mark
|
||||||
|
}
|
||||||
|
|
||||||
|
for escaped, actual := range replacements {
|
||||||
|
fp = strings.ReplaceAll(fp, escaped, actual)
|
||||||
|
}
|
||||||
|
return fp
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractFileNames(input string) []string {
|
||||||
|
// Regex to match file paths starting with optional drive letter, / ./ \ or .\ and include escaped or unescaped spaces (\ or %20)
|
||||||
|
// and followed by more characters and a file extension
|
||||||
|
// This will capture non filename strings, but we'll check for file existence to remove mismatches
|
||||||
|
regexPattern := `(?:[a-zA-Z]:)?(?:\./|/|\\)[\S\\ ]+?\.(?i:jpg|jpeg|png|svg)\b`
|
||||||
|
re := regexp.MustCompile(regexPattern)
|
||||||
|
|
||||||
|
return re.FindAllString(input, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractFileData(input string) (string, []api.ImageData, error) {
|
||||||
|
filePaths := extractFileNames(input)
|
||||||
|
var imgs []api.ImageData
|
||||||
|
|
||||||
|
for _, fp := range filePaths {
|
||||||
|
nfp := normalizeFilePath(fp)
|
||||||
|
data, err := getImageData(nfp)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "Couldn't process image: %q\n", err)
|
||||||
|
return "", imgs, err
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "Added image '%s'\n", nfp)
|
||||||
|
input = strings.ReplaceAll(input, fp, "")
|
||||||
|
imgs = append(imgs, data)
|
||||||
|
}
|
||||||
|
return input, imgs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getImageData(filePath string) ([]byte, error) {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
buf := make([]byte, 512)
|
||||||
|
_, err = file.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := http.DetectContentType(buf)
|
||||||
|
allowedTypes := []string{"image/jpeg", "image/jpg", "image/png"}
|
||||||
|
if !slices.Contains(allowedTypes, contentType) {
|
||||||
|
return nil, fmt.Errorf("invalid image type: %s", contentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the file size exceeds 100MB
|
||||||
|
var maxSize int64 = 100 * 1024 * 1024 // 100MB in bytes
|
||||||
|
if info.Size() > maxSize {
|
||||||
|
return nil, fmt.Errorf("file size exceeds maximum limit (100MB)")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = make([]byte, info.Size())
|
||||||
|
_, err = file.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.ReadFull(file, buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
117
cmd/interactive_test.go
Normal file
117
cmd/interactive_test.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestExtractFilenames(t *testing.T) {
|
||||||
|
// Unix style paths
|
||||||
|
input := ` some preamble
|
||||||
|
./relative\ path/one.png inbetween1 ./not a valid two.jpg inbetween2
|
||||||
|
/unescaped space /three.jpeg inbetween3 /valid\ path/dir/four.png "./quoted with spaces/five.svg`
|
||||||
|
res := extractFileNames(input)
|
||||||
|
assert.Len(t, res, 5)
|
||||||
|
assert.Contains(t, res[0], "one.png")
|
||||||
|
assert.Contains(t, res[1], "two.jpg")
|
||||||
|
assert.Contains(t, res[2], "three.jpeg")
|
||||||
|
assert.Contains(t, res[3], "four.png")
|
||||||
|
assert.Contains(t, res[4], "five.svg")
|
||||||
|
assert.NotContains(t, res[4], '"')
|
||||||
|
assert.NotContains(t, res, "inbtween")
|
||||||
|
|
||||||
|
// Windows style paths
|
||||||
|
input = ` some preamble
|
||||||
|
c:/users/jdoe/one.png inbetween1 c:/program files/someplace/two.jpg inbetween2
|
||||||
|
/absolute/nospace/three.jpeg inbetween3 /absolute/with space/four.png inbetween4
|
||||||
|
./relative\ path/five.svg inbetween5 "./relative with/spaces/six.png inbetween6
|
||||||
|
d:\path with\spaces\seven.svg inbetween7 c:\users\jdoe\eight.png inbetween8
|
||||||
|
d:\program files\someplace\nine.png inbetween9 "E:\program files\someplace\ten.svg some ending
|
||||||
|
`
|
||||||
|
res = extractFileNames(input)
|
||||||
|
assert.Len(t, res, 10)
|
||||||
|
assert.NotContains(t, res, "inbtween")
|
||||||
|
assert.Contains(t, res[0], "one.png")
|
||||||
|
assert.Contains(t, res[0], "c:")
|
||||||
|
assert.Contains(t, res[1], "two.jpg")
|
||||||
|
assert.Contains(t, res[1], "c:")
|
||||||
|
assert.Contains(t, res[2], "three.jpeg")
|
||||||
|
assert.Contains(t, res[3], "four.png")
|
||||||
|
assert.Contains(t, res[4], "five.svg")
|
||||||
|
assert.Contains(t, res[5], "six.png")
|
||||||
|
assert.Contains(t, res[6], "seven.svg")
|
||||||
|
assert.Contains(t, res[6], "d:")
|
||||||
|
assert.Contains(t, res[7], "eight.png")
|
||||||
|
assert.Contains(t, res[7], "c:")
|
||||||
|
assert.Contains(t, res[8], "nine.png")
|
||||||
|
assert.Contains(t, res[8], "d:")
|
||||||
|
assert.Contains(t, res[9], "ten.svg")
|
||||||
|
assert.Contains(t, res[9], "E:")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestModelfileBuilder(t *testing.T) {
|
||||||
|
opts := runOptions{
|
||||||
|
Model: "hork",
|
||||||
|
System: "You are part horse and part shark, but all hork. Do horklike things",
|
||||||
|
Template: "This is a template.",
|
||||||
|
Messages: []api.Message{
|
||||||
|
{Role: "user", Content: "Hey there hork!"},
|
||||||
|
{Role: "assistant", Content: "Yes it is true, I am half horse, half shark."},
|
||||||
|
},
|
||||||
|
Options: map[string]interface{}{},
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.Options["temperature"] = 0.9
|
||||||
|
opts.Options["seed"] = 42
|
||||||
|
opts.Options["penalize_newline"] = false
|
||||||
|
opts.Options["stop"] = []string{"hi", "there"}
|
||||||
|
|
||||||
|
mf := buildModelfile(opts)
|
||||||
|
expectedModelfile := `FROM {{.Model}}
|
||||||
|
SYSTEM """{{.System}}"""
|
||||||
|
TEMPLATE """{{.Template}}"""
|
||||||
|
PARAMETER penalize_newline false
|
||||||
|
PARAMETER seed 42
|
||||||
|
PARAMETER stop [hi there]
|
||||||
|
PARAMETER temperature 0.9
|
||||||
|
|
||||||
|
MESSAGE user """Hey there hork!"""
|
||||||
|
MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
||||||
|
`
|
||||||
|
|
||||||
|
tmpl, err := template.New("").Parse(expectedModelfile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err = tmpl.Execute(&buf, opts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, buf.String(), mf)
|
||||||
|
|
||||||
|
opts.ParentModel = "horseshark"
|
||||||
|
mf = buildModelfile(opts)
|
||||||
|
expectedModelfile = `FROM {{.ParentModel}}
|
||||||
|
SYSTEM """{{.System}}"""
|
||||||
|
TEMPLATE """{{.Template}}"""
|
||||||
|
PARAMETER penalize_newline false
|
||||||
|
PARAMETER seed 42
|
||||||
|
PARAMETER stop [hi there]
|
||||||
|
PARAMETER temperature 0.9
|
||||||
|
|
||||||
|
MESSAGE user """Hey there hork!"""
|
||||||
|
MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
||||||
|
`
|
||||||
|
|
||||||
|
tmpl, err = template.New("").Parse(expectedModelfile)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var parentBuf bytes.Buffer
|
||||||
|
err = tmpl.Execute(&parentBuf, opts)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, parentBuf.String(), mf)
|
||||||
|
}
|
||||||
27
cmd/start.go
Normal file
27
cmd/start.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
//go:build darwin || windows
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitForServer(ctx context.Context, client *api.Client) error {
|
||||||
|
// wait for the server to start
|
||||||
|
timeout := time.After(5 * time.Second)
|
||||||
|
tick := time.Tick(500 * time.Millisecond)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
return errors.New("timed out waiting for server to start")
|
||||||
|
case <-tick:
|
||||||
|
if err := client.Heartbeat(ctx); err == nil {
|
||||||
|
return nil // server has started
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
30
cmd/start_darwin.go
Normal file
30
cmd/start_darwin.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func startApp(ctx context.Context, client *api.Client) error {
|
||||||
|
exe, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
link, err := os.Readlink(exe)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !strings.Contains(link, "Ollama.app") {
|
||||||
|
return fmt.Errorf("could not find ollama app")
|
||||||
|
}
|
||||||
|
path := strings.Split(link, "Ollama.app")
|
||||||
|
if err := exec.Command("/usr/bin/open", "-a", path[0]+"Ollama.app").Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return waitForServer(ctx, client)
|
||||||
|
}
|
||||||
14
cmd/start_default.go
Normal file
14
cmd/start_default.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
//go:build !windows && !darwin
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func startApp(ctx context.Context, client *api.Client) error {
|
||||||
|
return fmt.Errorf("could not connect to ollama server, run 'ollama serve' to start it")
|
||||||
|
}
|
||||||
58
cmd/start_windows.go
Normal file
58
cmd/start_windows.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func startApp(ctx context.Context, client *api.Client) error {
|
||||||
|
// log.Printf("XXX Attempting to find and start ollama app")
|
||||||
|
AppName := "ollama app.exe"
|
||||||
|
exe, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
appExe := filepath.Join(filepath.Dir(exe), AppName)
|
||||||
|
_, err = os.Stat(appExe)
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
// Try the standard install location
|
||||||
|
localAppData := os.Getenv("LOCALAPPDATA")
|
||||||
|
appExe = filepath.Join(localAppData, "Ollama", AppName)
|
||||||
|
_, err := os.Stat(appExe)
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
// Finally look in the path
|
||||||
|
appExe, err = exec.LookPath(AppName)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not locate ollama app")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// log.Printf("XXX attempting to start app %s", appExe)
|
||||||
|
|
||||||
|
cmd_path := "c:\\Windows\\system32\\cmd.exe"
|
||||||
|
cmd := exec.Command(cmd_path, "/c", appExe)
|
||||||
|
// TODO - these hide flags aren't working - still pops up a command window for some reason
|
||||||
|
cmd.SysProcAttr = &syscall.SysProcAttr{CreationFlags: 0x08000000, HideWindow: true}
|
||||||
|
|
||||||
|
// TODO this didn't help either...
|
||||||
|
cmd.Stdin = strings.NewReader("")
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return fmt.Errorf("unable to start ollama app %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.Process != nil {
|
||||||
|
defer cmd.Process.Release() //nolint:errcheck
|
||||||
|
}
|
||||||
|
return waitForServer(ctx, client)
|
||||||
|
}
|
||||||
200
convert/convert.go
Normal file
200
convert/convert.go
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/convert/sentencepiece"
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
_ int32 = iota
|
||||||
|
tokenTypeNormal
|
||||||
|
tokenTypeUnknown
|
||||||
|
tokenTypeControl
|
||||||
|
tokenTypeUserDefined
|
||||||
|
tokenTypeUnused
|
||||||
|
tokenTypeByte
|
||||||
|
)
|
||||||
|
|
||||||
|
type Params struct {
|
||||||
|
Architectures []string `json:"architectures"`
|
||||||
|
VocabSize int `json:"vocab_size"`
|
||||||
|
HiddenSize int `json:"hidden_size"` // n_embd
|
||||||
|
HiddenLayers int `json:"num_hidden_layers"` // n_layer
|
||||||
|
ContextSize int `json:"max_position_embeddings"`
|
||||||
|
IntermediateSize int `json:"intermediate_size"`
|
||||||
|
AttentionHeads int `json:"num_attention_heads"` // n_head
|
||||||
|
KeyValHeads int `json:"num_key_value_heads"`
|
||||||
|
NormEPS float64 `json:"rms_norm_eps"`
|
||||||
|
BoSTokenID int `json:"bos_token_id"`
|
||||||
|
EoSTokenID int `json:"eos_token_id"`
|
||||||
|
HeadDimension int `json:"head_dim"`
|
||||||
|
PaddingTokenID int `json:"pad_token_id"`
|
||||||
|
RopeFrequencyBase float64 `json:"rope_theta"`
|
||||||
|
|
||||||
|
Experts int `json:"num_local_experts"`
|
||||||
|
ExpertsUsed int `json:"num_experts_per_tok"`
|
||||||
|
|
||||||
|
PreTokenizer string
|
||||||
|
|
||||||
|
ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
type ByteOrder interface {
|
||||||
|
binary.ByteOrder
|
||||||
|
binary.AppendByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
type ModelArch interface {
|
||||||
|
GetTensors() error
|
||||||
|
LoadVocab() error
|
||||||
|
WriteGGUF(io.WriteSeeker) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type ModelFormat interface {
|
||||||
|
GetLayerName(string) (string, error)
|
||||||
|
GetTensors(string, *Params) ([]llm.Tensor, error)
|
||||||
|
GetParams(string) (*Params, error)
|
||||||
|
GetModelArch(string, string, *Params) (ModelArch, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ModelData struct {
|
||||||
|
Path string
|
||||||
|
Name string
|
||||||
|
Params *Params
|
||||||
|
Vocab *Vocab
|
||||||
|
Tensors []llm.Tensor
|
||||||
|
Format ModelFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetModelFormat(dirname string) (ModelFormat, error) {
|
||||||
|
files, err := filepath.Glob(filepath.Join(dirname, "*"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fn := range files {
|
||||||
|
if strings.HasSuffix(fn, ".safetensors") {
|
||||||
|
return &SafetensorFormat{}, nil
|
||||||
|
} else if strings.HasSuffix(fn, ".bin") || strings.HasSuffix(fn, ".pth") {
|
||||||
|
slog.Debug("model is torch")
|
||||||
|
return &TorchFormat{}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("couldn't determine model format")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Details on gguf's tokenizer can be found at:
|
||||||
|
// https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#tokenizer
|
||||||
|
type Vocab struct {
|
||||||
|
Tokens []string
|
||||||
|
Scores []float32
|
||||||
|
Types []int32
|
||||||
|
Merges []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
||||||
|
slog.Info(fmt.Sprintf("reading vocab from %s", filepath.Join(dirpath, "tokenizer.model")))
|
||||||
|
in, err := os.ReadFile(filepath.Join(dirpath, "tokenizer.model"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// To regenerate sentencepiece from the protobufs use:
|
||||||
|
// protoc -I=./ --go_out=./ sentencepiece_model.proto
|
||||||
|
modelProto := &sentencepiece.ModelProto{}
|
||||||
|
if err := proto.Unmarshal(in, modelProto); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := &Vocab{
|
||||||
|
Tokens: make([]string, 0),
|
||||||
|
Scores: make([]float32, 0),
|
||||||
|
Types: make([]int32, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
pieces := modelProto.GetPieces()
|
||||||
|
for _, p := range pieces {
|
||||||
|
v.Tokens = append(v.Tokens, p.GetPiece())
|
||||||
|
v.Scores = append(v.Scores, p.GetScore())
|
||||||
|
t := p.GetType()
|
||||||
|
switch t {
|
||||||
|
case sentencepiece.ModelProto_SentencePiece_UNKNOWN:
|
||||||
|
case sentencepiece.ModelProto_SentencePiece_CONTROL:
|
||||||
|
case sentencepiece.ModelProto_SentencePiece_UNUSED:
|
||||||
|
case sentencepiece.ModelProto_SentencePiece_BYTE:
|
||||||
|
default:
|
||||||
|
t = sentencepiece.ModelProto_SentencePiece_NORMAL
|
||||||
|
}
|
||||||
|
v.Types = append(v.Types, int32(t))
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info(fmt.Sprintf("vocab size: %d", len(v.Tokens)))
|
||||||
|
|
||||||
|
// add any additional tokens
|
||||||
|
addIn, err := os.ReadFile(filepath.Join(dirpath, "added_tokens.json"))
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return v, nil
|
||||||
|
} else if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("reading user defined tokens")
|
||||||
|
|
||||||
|
var extraTokenData map[string]int
|
||||||
|
if err := json.Unmarshal(addIn, &extraTokenData); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type token struct {
|
||||||
|
key string
|
||||||
|
pos int
|
||||||
|
}
|
||||||
|
|
||||||
|
extraTokens := make([]token, 0)
|
||||||
|
for k, id := range extraTokenData {
|
||||||
|
extraTokens = append(extraTokens, token{k, id})
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.SortFunc(extraTokens, func(a, b token) int {
|
||||||
|
return cmp.Compare(a.pos, b.pos)
|
||||||
|
})
|
||||||
|
|
||||||
|
numToks := len(v.Tokens)
|
||||||
|
|
||||||
|
for cnt, t := range extraTokens {
|
||||||
|
// the token id should match the specific index for the total number of tokens
|
||||||
|
if t.pos != cnt+numToks {
|
||||||
|
return nil, fmt.Errorf("token ID '%d' for '%s' doesn't match total token size", t.pos, t.key)
|
||||||
|
}
|
||||||
|
v.Tokens = append(v.Tokens, t.key)
|
||||||
|
v.Scores = append(v.Scores, -1000.0)
|
||||||
|
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||||
|
}
|
||||||
|
slog.Info(fmt.Sprintf("vocab size w/ extra tokens: %d", len(v.Tokens)))
|
||||||
|
|
||||||
|
if params.VocabSize > len(v.Tokens) {
|
||||||
|
missingTokens := params.VocabSize - len(v.Tokens)
|
||||||
|
slog.Warn(fmt.Sprintf("vocab is missing %d tokens", missingTokens))
|
||||||
|
for cnt := range missingTokens {
|
||||||
|
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
||||||
|
v.Scores = append(v.Scores, -1)
|
||||||
|
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
103
convert/convert_test.go
Normal file
103
convert/convert_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
//go:build slow
|
||||||
|
|
||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func convertFull(t *testing.T, p string) (llm.KV, llm.Tensors) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
mf, err := GetModelFormat(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
params, err := mf.GetParams(p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
arch, err := mf.GetModelArch("", p, params)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := arch.LoadVocab(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := arch.GetTensors(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.CreateTemp(t.TempDir(), "f16")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if err := arch.WriteGGUF(f); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := os.Open(f.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
m, _, err := llm.DecodeGGML(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.KV(), m.Tensors()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConvertFull(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
path string
|
||||||
|
arch string
|
||||||
|
tensors int
|
||||||
|
layers int
|
||||||
|
}{
|
||||||
|
{"Meta-Llama-3-8B-Instruct", "llama", 291, 35},
|
||||||
|
{"Mistral-7B-Instruct-v0.2", "llama", 291, 35},
|
||||||
|
{"Mixtral-8x7B-Instruct-v0.1", "llama", 291, 35},
|
||||||
|
{"gemma-2b-it", "gemma", 164, 20},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run(tt.path, func(t *testing.T) {
|
||||||
|
p := filepath.Join("testdata", tt.path)
|
||||||
|
if _, err := os.Stat(p); err != nil {
|
||||||
|
t.Skipf("%s not found", p)
|
||||||
|
}
|
||||||
|
|
||||||
|
kv, tensors := convertFull(t, p)
|
||||||
|
|
||||||
|
if kv.Architecture() != tt.arch {
|
||||||
|
t.Fatalf("expected llama, got %s", kv.Architecture())
|
||||||
|
}
|
||||||
|
|
||||||
|
if kv.FileType().String() != "F16" {
|
||||||
|
t.Fatalf("expected F16, got %s", kv.FileType())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tensors) != tt.tensors {
|
||||||
|
t.Fatalf("expected %d tensors, got %d", tt.tensors, len(tensors))
|
||||||
|
}
|
||||||
|
|
||||||
|
layers := tensors.Layers()
|
||||||
|
if len(layers) != tt.layers {
|
||||||
|
t.Fatalf("expected %d layers, got %d", tt.layers, len(layers))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
102
convert/gemma.go
Normal file
102
convert/gemma.go
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pdevine/tensor"
|
||||||
|
"github.com/pdevine/tensor/native"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GemmaModel struct {
|
||||||
|
ModelData
|
||||||
|
}
|
||||||
|
|
||||||
|
func addOnes(data []float32, vectorSize int) ([]float32, error) {
|
||||||
|
n := tensor.New(tensor.WithShape(vectorSize), tensor.WithBacking(data))
|
||||||
|
ones := tensor.Ones(tensor.Float32, vectorSize)
|
||||||
|
|
||||||
|
n, err := n.Add(ones)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := native.SelectF32(n, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var f32s []float32
|
||||||
|
for _, t := range ts {
|
||||||
|
f32s = append(f32s, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f32s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GemmaModel) GetTensors() error {
|
||||||
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug(fmt.Sprintf("Total tensors: %d", len(t)))
|
||||||
|
for _, l := range t {
|
||||||
|
if strings.HasSuffix(l.Name, "norm.weight") {
|
||||||
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
}
|
||||||
|
m.Tensors = append(m.Tensors, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GemmaModel) LoadVocab() error {
|
||||||
|
v, err := LoadSentencePieceTokens(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Vocab = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GemmaModel) Repack(_ string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return addOnes(data, int(shape[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *GemmaModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
|
kv := llm.KV{
|
||||||
|
"general.architecture": "gemma",
|
||||||
|
"general.name": m.Name,
|
||||||
|
"gemma.context_length": uint32(m.Params.ContextSize),
|
||||||
|
"gemma.embedding_length": uint32(m.Params.HiddenSize),
|
||||||
|
"gemma.block_count": uint32(m.Params.HiddenLayers),
|
||||||
|
"gemma.feed_forward_length": uint32(m.Params.IntermediateSize),
|
||||||
|
"gemma.attention.head_count": uint32(m.Params.AttentionHeads),
|
||||||
|
"gemma.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
||||||
|
"gemma.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
||||||
|
"gemma.attention.key_length": uint32(m.Params.HeadDimension),
|
||||||
|
"gemma.attention.value_length": uint32(m.Params.HeadDimension),
|
||||||
|
"general.file_type": uint32(1),
|
||||||
|
"tokenizer.ggml.model": "llama",
|
||||||
|
|
||||||
|
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
||||||
|
"tokenizer.ggml.scores": m.Vocab.Scores,
|
||||||
|
"tokenizer.ggml.token_type": m.Vocab.Types,
|
||||||
|
|
||||||
|
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
||||||
|
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
||||||
|
"tokenizer.ggml.padding_token_id": uint32(m.Params.PaddingTokenID),
|
||||||
|
"tokenizer.ggml.unknown_token_id": uint32(3),
|
||||||
|
"tokenizer.ggml.add_bos_token": true,
|
||||||
|
"tokenizer.ggml.add_eos_token": false,
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
|
}
|
||||||
159
convert/llama.go
Normal file
159
convert/llama.go
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pdevine/tensor"
|
||||||
|
"github.com/pdevine/tensor/native"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LlamaModel struct {
|
||||||
|
ModelData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LlamaModel) GetTensors() error {
|
||||||
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
|
re, err := regexp.Compile(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range t {
|
||||||
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
|
if len(matches) > 0 {
|
||||||
|
switch m.Format.(type) {
|
||||||
|
case *TorchFormat:
|
||||||
|
wt := l.WriterTo.(torchWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
case *SafetensorFormat:
|
||||||
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.Tensors = append(m.Tensors, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LlamaModel) LoadVocab() (err error) {
|
||||||
|
pre, ts, merges, err := parseTokens(filepath.Join(m.Path, "tokenizer.json"))
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Vocab = &Vocab{}
|
||||||
|
for _, t := range ts {
|
||||||
|
m.Vocab.Tokens = append(m.Vocab.Tokens, t.Content)
|
||||||
|
m.Vocab.Types = append(m.Vocab.Types, t.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Vocab.Merges = merges
|
||||||
|
m.Params.PreTokenizer = pre
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LlamaModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
|
kv := llm.KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
"general.name": m.Name,
|
||||||
|
"llama.vocab_size": uint32(len(m.Vocab.Tokens)),
|
||||||
|
"llama.context_length": uint32(m.Params.ContextSize),
|
||||||
|
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
||||||
|
"llama.block_count": uint32(m.Params.HiddenLayers),
|
||||||
|
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
||||||
|
"llama.rope.freq_base": float32(m.Params.RopeFrequencyBase),
|
||||||
|
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
||||||
|
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
||||||
|
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
||||||
|
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
||||||
|
"general.file_type": uint32(1),
|
||||||
|
"tokenizer.ggml.model": "gpt2",
|
||||||
|
|
||||||
|
"tokenizer.ggml.pre": m.Params.PreTokenizer,
|
||||||
|
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
||||||
|
"tokenizer.ggml.token_type": m.Vocab.Types,
|
||||||
|
|
||||||
|
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
||||||
|
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
||||||
|
"tokenizer.ggml.unknown_token_id": uint32(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(m.Vocab.Merges) > 0 {
|
||||||
|
kv["tokenizer.ggml.merges"] = m.Vocab.Merges
|
||||||
|
} else {
|
||||||
|
kv["tokenizer.ggml.scores"] = m.Vocab.Scores
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *LlamaModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
|
|
||||||
|
func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
var dims []int
|
||||||
|
for _, dim := range shape {
|
||||||
|
if dim != 0 {
|
||||||
|
dims = append(dims, int(dim))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var heads int
|
||||||
|
switch {
|
||||||
|
case strings.HasSuffix(name, "attn_q.weight"):
|
||||||
|
heads = params.AttentionHeads
|
||||||
|
case strings.HasSuffix(name, "attn_k.weight"):
|
||||||
|
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown tensor name: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := tensor.New(tensor.WithShape(dims...), tensor.WithBacking(data))
|
||||||
|
if err := n.Reshape(append([]int{heads, 2, dims[0] / heads / 2}, dims[1:]...)...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.T(0, 2, 1, 3); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.Reshape(dims...); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := n.Transpose(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ts, err := native.SelectF32(n, 1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var f32s []float32
|
||||||
|
for _, t := range ts {
|
||||||
|
f32s = append(f32s, t...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f32s, nil
|
||||||
|
}
|
||||||
79
convert/mistral.go
Normal file
79
convert/mistral.go
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MistralModel struct {
|
||||||
|
ModelData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MistralModel) GetTensors() error {
|
||||||
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
|
re, err := regexp.Compile(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range t {
|
||||||
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
|
if len(matches) > 0 {
|
||||||
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
}
|
||||||
|
m.Tensors = append(m.Tensors, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MistralModel) LoadVocab() error {
|
||||||
|
v, err := LoadSentencePieceTokens(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Vocab = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MistralModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
|
kv := llm.KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
"general.name": m.Name,
|
||||||
|
"llama.context_length": uint32(m.Params.ContextSize),
|
||||||
|
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
||||||
|
"llama.block_count": uint32(m.Params.HiddenLayers),
|
||||||
|
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
||||||
|
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
||||||
|
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
||||||
|
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
||||||
|
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
||||||
|
"general.file_type": uint32(1),
|
||||||
|
"tokenizer.ggml.model": "llama",
|
||||||
|
|
||||||
|
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
||||||
|
"tokenizer.ggml.scores": m.Vocab.Scores,
|
||||||
|
"tokenizer.ggml.token_type": m.Vocab.Types,
|
||||||
|
|
||||||
|
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
||||||
|
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
||||||
|
"tokenizer.ggml.add_bos_token": true,
|
||||||
|
"tokenizer.ggml.add_eos_token": false,
|
||||||
|
"tokenizer.ggml.unknown_token_id": uint32(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MistralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
87
convert/mixtral.go
Normal file
87
convert/mixtral.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MixtralModel struct {
|
||||||
|
ModelData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) GetTensors() error {
|
||||||
|
t, err := m.Format.GetTensors(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pattern := `^blk\.[0-9]+\.attn_(?P<layer>q|k)\.weight$`
|
||||||
|
re, err := regexp.Compile(pattern)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, l := range t {
|
||||||
|
matches := re.FindAllStringSubmatch(l.Name, -1)
|
||||||
|
if len(matches) > 0 {
|
||||||
|
wt := l.WriterTo.(safetensorWriterTo)
|
||||||
|
wt.repacker = m.Repack
|
||||||
|
l.WriterTo = wt
|
||||||
|
}
|
||||||
|
m.Tensors = append(m.Tensors, l)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) LoadVocab() error {
|
||||||
|
v, err := LoadSentencePieceTokens(m.Path, m.Params)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.Vocab = v
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) WriteGGUF(ws io.WriteSeeker) error {
|
||||||
|
kv := llm.KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
"general.name": m.Name,
|
||||||
|
"llama.block_count": uint32(m.Params.HiddenLayers),
|
||||||
|
"llama.context_length": uint32(m.Params.ContextSize),
|
||||||
|
"llama.embedding_length": uint32(m.Params.HiddenSize),
|
||||||
|
"llama.feed_forward_length": uint32(m.Params.IntermediateSize),
|
||||||
|
"llama.attention.head_count": uint32(m.Params.AttentionHeads),
|
||||||
|
"llama.attention.head_count_kv": uint32(m.Params.KeyValHeads),
|
||||||
|
|
||||||
|
"llama.rope.freq_base": float32(m.Params.RopeFrequencyBase),
|
||||||
|
"llama.attention.layer_norm_rms_epsilon": float32(m.Params.NormEPS),
|
||||||
|
|
||||||
|
"llama.expert_count": uint32(m.Params.Experts),
|
||||||
|
"llama.expert_used_count": uint32(m.Params.ExpertsUsed),
|
||||||
|
|
||||||
|
"llama.vocab_size": uint32(len(m.Vocab.Tokens)),
|
||||||
|
"llama.rope.dimension_count": uint32(m.Params.HiddenSize / m.Params.AttentionHeads),
|
||||||
|
|
||||||
|
"general.file_type": uint32(1),
|
||||||
|
"tokenizer.ggml.model": "llama",
|
||||||
|
|
||||||
|
"tokenizer.ggml.tokens": m.Vocab.Tokens,
|
||||||
|
"tokenizer.ggml.scores": m.Vocab.Scores,
|
||||||
|
"tokenizer.ggml.token_type": m.Vocab.Types,
|
||||||
|
|
||||||
|
"tokenizer.ggml.bos_token_id": uint32(m.Params.BoSTokenID),
|
||||||
|
"tokenizer.ggml.eos_token_id": uint32(m.Params.EoSTokenID),
|
||||||
|
"tokenizer.ggml.unknown_token_id": uint32(0),
|
||||||
|
"tokenizer.ggml.add_bos_token": true,
|
||||||
|
"tokenizer.ggml.add_eos_token": false,
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.NewGGUFV3(m.Params.ByteOrder).Encode(ws, kv, m.Tensors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MixtralModel) Repack(name string, data []float32, shape []uint64) ([]float32, error) {
|
||||||
|
return llamaRepack(name, m.Params, data, shape)
|
||||||
|
}
|
||||||
309
convert/safetensors.go
Normal file
309
convert/safetensors.go
Normal file
@@ -0,0 +1,309 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/d4l3k/go-bfloat16"
|
||||||
|
"github.com/x448/float16"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type safetensorWriterTo struct {
|
||||||
|
t *llm.Tensor
|
||||||
|
|
||||||
|
params *Params
|
||||||
|
bo ByteOrder
|
||||||
|
|
||||||
|
filename string
|
||||||
|
dtype string
|
||||||
|
|
||||||
|
offset, size int64
|
||||||
|
repacker func(string, []float32, []uint64) ([]float32, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type safetensorMetadata struct {
|
||||||
|
Type string `json:"dtype"`
|
||||||
|
Shape []uint64 `json:"shape"`
|
||||||
|
Offsets []int64 `json:"data_offsets"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SafetensorFormat struct{}
|
||||||
|
|
||||||
|
func (m *SafetensorFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
||||||
|
var tensors []llm.Tensor
|
||||||
|
matches, err := filepath.Glob(filepath.Join(dirpath, "*.safetensors"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint64
|
||||||
|
for _, f := range matches {
|
||||||
|
var t []llm.Tensor
|
||||||
|
var err error
|
||||||
|
t, offset, err = m.readTensors(f, offset, params)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tensors = append(tensors, t...)
|
||||||
|
}
|
||||||
|
return tensors, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params) ([]llm.Tensor, uint64, error) {
|
||||||
|
f, err := os.Open(fn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var n int64
|
||||||
|
if err := binary.Read(f, binary.LittleEndian, &n); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, n))
|
||||||
|
if _, err = io.CopyN(b, f, n); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var headers map[string]safetensorMetadata
|
||||||
|
if err := json.NewDecoder(b).Decode(&headers); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var keys []string
|
||||||
|
for key := range headers {
|
||||||
|
if !strings.HasSuffix(key, "self_attn.rotary_embd.inv_freq") {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(keys)
|
||||||
|
|
||||||
|
var tensors []llm.Tensor
|
||||||
|
for _, key := range keys {
|
||||||
|
value := headers[key]
|
||||||
|
|
||||||
|
var kind uint32
|
||||||
|
switch len(value.Shape) {
|
||||||
|
case 0:
|
||||||
|
// valuedata
|
||||||
|
continue
|
||||||
|
case 2:
|
||||||
|
kind = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
name, err := m.GetLayerName(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
shape := make([]uint64, len(value.Shape))
|
||||||
|
copy(shape, value.Shape)
|
||||||
|
|
||||||
|
pad := func(s int64) int64 {
|
||||||
|
return 8 + n + s
|
||||||
|
}
|
||||||
|
|
||||||
|
t := llm.Tensor{
|
||||||
|
Name: name,
|
||||||
|
Kind: kind,
|
||||||
|
Offset: offset,
|
||||||
|
Shape: shape,
|
||||||
|
}
|
||||||
|
|
||||||
|
t.WriterTo = safetensorWriterTo{
|
||||||
|
t: &t,
|
||||||
|
params: params,
|
||||||
|
bo: params.ByteOrder,
|
||||||
|
filename: fn,
|
||||||
|
dtype: value.Type,
|
||||||
|
offset: pad(value.Offsets[0]),
|
||||||
|
size: pad(value.Offsets[1]) - pad(value.Offsets[0]),
|
||||||
|
}
|
||||||
|
|
||||||
|
offset += t.Size()
|
||||||
|
tensors = append(tensors, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tensors, offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SafetensorFormat) GetParams(dirpath string) (*Params, error) {
|
||||||
|
f, err := os.Open(filepath.Join(dirpath, "config.json"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var params Params
|
||||||
|
|
||||||
|
if err := json.NewDecoder(f).Decode(¶ms); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
params.ByteOrder = binary.LittleEndian
|
||||||
|
return ¶ms, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SafetensorFormat) GetLayerName(n string) (string, error) {
|
||||||
|
directMap := map[string]string{
|
||||||
|
"model.embed_tokens.weight": "token_embd.weight",
|
||||||
|
"lm_head.weight": "output.weight",
|
||||||
|
"model.norm.weight": "output_norm.weight",
|
||||||
|
}
|
||||||
|
|
||||||
|
tMap := map[string]string{
|
||||||
|
"model.layers.(\\d+).input_layernorm.weight": "blk.$1.attn_norm.weight",
|
||||||
|
"model.layers.(\\d+).mlp.down_proj.weight": "blk.$1.ffn_down.weight",
|
||||||
|
"model.layers.(\\d+).mlp.gate_proj.weight": "blk.$1.ffn_gate.weight",
|
||||||
|
"model.layers.(\\d+).mlp.up_proj.weight": "blk.$1.ffn_up.weight",
|
||||||
|
"model.layers.(\\d+).post_attention_layernorm.weight": "blk.$1.ffn_norm.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.k_proj.weight": "blk.$1.attn_k.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.o_proj.weight": "blk.$1.attn_output.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.q_proj.weight": "blk.$1.attn_q.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.v_proj.weight": "blk.$1.attn_v.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.gate.weight": "blk.$1.ffn_gate_inp.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.experts.(\\d+).w1.weight": "blk.$1.ffn_gate.$2.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.experts.(\\d+).w2.weight": "blk.$1.ffn_down.$2.weight",
|
||||||
|
"model.layers.(\\d+).block_sparse_moe.experts.(\\d+).w3.weight": "blk.$1.ffn_up.$2.weight",
|
||||||
|
}
|
||||||
|
|
||||||
|
v, ok := directMap[n]
|
||||||
|
if ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// quick hack to rename the layers to gguf format
|
||||||
|
for k, v := range tMap {
|
||||||
|
re := regexp.MustCompile(k)
|
||||||
|
newName := re.ReplaceAllString(n, v)
|
||||||
|
if newName != n {
|
||||||
|
return newName, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("couldn't find a layer name for '%s'", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r safetensorWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
||||||
|
f, err := os.Open(r.filename)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
if _, err = f.Seek(r.offset, io.SeekStart); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var f32s []float32
|
||||||
|
switch r.dtype {
|
||||||
|
case "F32":
|
||||||
|
f32s = make([]float32, r.size/4)
|
||||||
|
if err = binary.Read(f, r.bo, f32s); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
case "F16":
|
||||||
|
u16s := make([]uint16, r.size/2)
|
||||||
|
if err = binary.Read(f, r.bo, u16s); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, b := range u16s {
|
||||||
|
f32s = append(f32s, float16.Frombits(b).Float32())
|
||||||
|
}
|
||||||
|
|
||||||
|
case "BF16":
|
||||||
|
u8s := make([]uint8, r.size)
|
||||||
|
if err = binary.Read(f, r.bo, u8s); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f32s = bfloat16.DecodeFloat32(u8s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown data type: %s", r.dtype)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.repacker != nil {
|
||||||
|
f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch r.t.Kind {
|
||||||
|
case 0:
|
||||||
|
return 0, binary.Write(w, r.bo, f32s)
|
||||||
|
case 1:
|
||||||
|
f16s := make([]uint16, len(f32s))
|
||||||
|
for i := range f32s {
|
||||||
|
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, binary.Write(w, r.bo, f16s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SafetensorFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
||||||
|
switch len(params.Architectures) {
|
||||||
|
case 0:
|
||||||
|
return nil, fmt.Errorf("No architecture specified to convert")
|
||||||
|
case 1:
|
||||||
|
switch params.Architectures[0] {
|
||||||
|
case "LlamaForCausalLM":
|
||||||
|
return &LlamaModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
case "MistralForCausalLM":
|
||||||
|
return &MistralModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
case "MixtralForCausalLM":
|
||||||
|
return &MixtralModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
case "GemmaForCausalLM":
|
||||||
|
return &GemmaModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Models based on '%s' are not yet supported", params.Architectures[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Unknown error")
|
||||||
|
}
|
||||||
1497
convert/sentencepiece/sentencepiece_model.pb.go
Normal file
1497
convert/sentencepiece/sentencepiece_model.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
333
convert/sentencepiece_model.proto
Normal file
333
convert/sentencepiece_model.proto
Normal file
@@ -0,0 +1,333 @@
|
|||||||
|
// Copyright 2016 Google Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.!
|
||||||
|
|
||||||
|
syntax = "proto2";
|
||||||
|
|
||||||
|
// TODO(taku): Needs to use LITE RUNTIME in OSS release.
|
||||||
|
option optimize_for = LITE_RUNTIME;
|
||||||
|
option go_package = "./sentencepiece";
|
||||||
|
|
||||||
|
package sentencepiece;
|
||||||
|
|
||||||
|
// TrainerSpec encodes a various parameters for SentencePiece training.
|
||||||
|
// Next id: 55
|
||||||
|
message TrainerSpec {
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// General parameters
|
||||||
|
//
|
||||||
|
// Input corpus files.
|
||||||
|
// Trainer accepts the following two formats:
|
||||||
|
// A) Monolingual: plain text, one sentence per line.
|
||||||
|
// B) Bilingual: TSV, source sentence <tab> target sentence
|
||||||
|
// When bilingual data is passed, shared vocabulary model is built.
|
||||||
|
// Note that the input file must be raw corpus, not a preprocessed corpus.
|
||||||
|
// Trainer only loads the first `input_sentence_size` sentences specified
|
||||||
|
// with this parameter.
|
||||||
|
repeated string input = 1;
|
||||||
|
|
||||||
|
// Input corpus format:
|
||||||
|
// "text": one-sentence-per-line text format (default)
|
||||||
|
// "tsv": sentence <tab> freq
|
||||||
|
optional string input_format = 7;
|
||||||
|
|
||||||
|
// Output model file prefix.
|
||||||
|
// <model_prefix>.model and <model_prefix>.vocab are generated.
|
||||||
|
optional string model_prefix = 2;
|
||||||
|
|
||||||
|
// Model type. only have UNIGRAM now.
|
||||||
|
enum ModelType {
|
||||||
|
UNIGRAM = 1; // Unigram language model with dynamic algorithm
|
||||||
|
BPE = 2; // Byte Pair Encoding
|
||||||
|
WORD = 3; // Delimitered by whitespace.
|
||||||
|
CHAR = 4; // tokenizes into character sequence
|
||||||
|
}
|
||||||
|
optional ModelType model_type = 3 [default = UNIGRAM];
|
||||||
|
|
||||||
|
// Vocabulary size. 8k is the default size.
|
||||||
|
optional int32 vocab_size = 4 [default = 8000];
|
||||||
|
|
||||||
|
// List of the languages this model can accept.
|
||||||
|
// Since the model is language-agnostic, this field is used as a reference.
|
||||||
|
repeated string accept_language = 5;
|
||||||
|
|
||||||
|
// Size of self-test samples, which are encoded in the model file.
|
||||||
|
optional int32 self_test_sample_size = 6 [default = 0];
|
||||||
|
|
||||||
|
// Whether to use DP version of sentencepiece. Use it with TSV input format
|
||||||
|
// (requires precomputed word tab counts to work).
|
||||||
|
optional bool enable_differential_privacy = 50 [default = false];
|
||||||
|
// Set these parameters if you need DP version of sentencepiece.
|
||||||
|
// std of noise to add.
|
||||||
|
optional float differential_privacy_noise_level = 51 [default = 0.0];
|
||||||
|
// Clipping threshold to apply after adding noise. All the words with
|
||||||
|
// frequency less than this value are dropped.
|
||||||
|
optional uint64 differential_privacy_clipping_threshold = 52 [default = 0];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// Training parameters.
|
||||||
|
//
|
||||||
|
// Uses characters which cover the corpus with the ratio of `chars_coverage`.
|
||||||
|
// This parameter determines the set of basic Alphabet of sentence piece.
|
||||||
|
// 1.0 - `chars_coverage` characters are treated as UNK.
|
||||||
|
// See also required_chars field.
|
||||||
|
optional float character_coverage = 10 [default = 0.9995];
|
||||||
|
|
||||||
|
// Maximum size of sentences the trainer loads from `input` parameter.
|
||||||
|
// Trainer simply loads the `input` files in sequence.
|
||||||
|
// It is better to shuffle the input corpus randomly.
|
||||||
|
optional uint64 input_sentence_size = 11 [default = 0];
|
||||||
|
optional bool shuffle_input_sentence = 19 [default = true];
|
||||||
|
|
||||||
|
// Maximum size of sentences to make seed sentence pieces.
|
||||||
|
// Extended suffix array is constructed to extract frequent
|
||||||
|
// sub-strings from the corpus. This uses 20N working space,
|
||||||
|
// where N is the size of corpus.
|
||||||
|
optional int32 mining_sentence_size = 12 [deprecated = true];
|
||||||
|
|
||||||
|
// Maximum size of sentences to train sentence pieces.
|
||||||
|
optional int32 training_sentence_size = 13 [deprecated = true];
|
||||||
|
|
||||||
|
// The size of seed sentencepieces.
|
||||||
|
// `seed_sentencepiece_size` must be larger than `vocab_size`.
|
||||||
|
optional int32 seed_sentencepiece_size = 14 [default = 1000000];
|
||||||
|
|
||||||
|
// In every EM sub-iterations, keeps top
|
||||||
|
// `shrinking_factor` * `current sentencepieces size` with respect to
|
||||||
|
// the loss of the sentence piece. This value should be smaller than 1.0.
|
||||||
|
optional float shrinking_factor = 15 [default = 0.75];
|
||||||
|
|
||||||
|
// The maximum sentence length in byte. The sentences with the length
|
||||||
|
// larger than `max_sentence_length` is simply ignored.
|
||||||
|
// Longer input tends to bring the following risks:
|
||||||
|
// * Overflow during EM training (unigram language model only)
|
||||||
|
// * Performance drop because of O(n log n) cost in BPE.
|
||||||
|
optional int32 max_sentence_length = 18 [default = 4192];
|
||||||
|
|
||||||
|
// Number of threads in the training.
|
||||||
|
optional int32 num_threads = 16 [default = 16];
|
||||||
|
|
||||||
|
// Number of EM sub iterations.
|
||||||
|
optional int32 num_sub_iterations = 17 [default = 2];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// SentencePiece parameters which control the shapes of sentence piece.
|
||||||
|
//
|
||||||
|
// Maximum length of sentencepiece.
|
||||||
|
optional int32 max_sentencepiece_length = 20 [default = 16];
|
||||||
|
|
||||||
|
// Uses Unicode script to split sentence pieces.
|
||||||
|
// When `split_by_unicode_script` is true, we do not allow sentence piece to
|
||||||
|
// include multiple Unicode scripts, e.g. "F1" is not a valid piece.
|
||||||
|
// Exception: CJ characters (Hiragana/Katakana/Han) are all handled
|
||||||
|
// as one script type, since Japanese word can consist of multiple scripts.
|
||||||
|
// This exception is always applied regardless of the accept-language
|
||||||
|
// parameter.
|
||||||
|
optional bool split_by_unicode_script = 21 [default = true];
|
||||||
|
|
||||||
|
// When `split_by_number` is true, put a boundary between number and
|
||||||
|
// non-number transition. If we want to treat "F1" is one token, set this flag
|
||||||
|
// to be false.
|
||||||
|
optional bool split_by_number = 23 [default = true];
|
||||||
|
|
||||||
|
// Use a white space to split sentence pieces.
|
||||||
|
// When `split_by_whitespace` is false, we may have the piece containing
|
||||||
|
// a white space in the middle. e.g., "in_the".
|
||||||
|
optional bool split_by_whitespace = 22 [default = true];
|
||||||
|
|
||||||
|
// Adds whitespace symbol (_) as a suffix instead of prefix. e.g., _hello =>
|
||||||
|
// hello_. When `treat_whitespace_as_suffix` is true,
|
||||||
|
// NormalizerSpec::add_dummy_prefix will add the dummy whitespace to the end
|
||||||
|
// of sentence.
|
||||||
|
optional bool treat_whitespace_as_suffix = 24 [default = false];
|
||||||
|
|
||||||
|
// Allows pieces that only contain whitespaces instead of appearing only as
|
||||||
|
// prefix or suffix of other pieces.
|
||||||
|
optional bool allow_whitespace_only_pieces = 26 [default = false];
|
||||||
|
|
||||||
|
// Split all digits (0-9) into separate pieces.
|
||||||
|
optional bool split_digits = 25 [default = false];
|
||||||
|
|
||||||
|
// Defines the pre-tokenization delimiter.
|
||||||
|
// When specified, no pieces crossing this delimiter is not included
|
||||||
|
// in the vocab. Then the delimiter string is virtually ignored
|
||||||
|
// during the training. This field can allows constraints on the vocabulary
|
||||||
|
// selection. Note that this field is available on unigram mode.
|
||||||
|
optional string pretokenization_delimiter = 53 [ default = ""];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// Vocabulary management
|
||||||
|
//
|
||||||
|
// Defines control symbols used as an indicator to
|
||||||
|
// change the behavior of the decoder. <s> and </s> are pre-defined.
|
||||||
|
// We can use this field to encode various meta information,
|
||||||
|
// including language indicator in multilingual model.
|
||||||
|
// These symbols are not visible to users, but visible to
|
||||||
|
// the decoder. Note that when the input sentence contains control symbols,
|
||||||
|
// they are not treated as one token, but segmented into normal pieces.
|
||||||
|
// Control symbols must be inserted independently from the segmentation.
|
||||||
|
repeated string control_symbols = 30;
|
||||||
|
|
||||||
|
// Defines user defined symbols.
|
||||||
|
// These symbols are added with extremely high score
|
||||||
|
// so they are always treated as one unique symbol in any context.
|
||||||
|
// Typical usage of user_defined_symbols is placeholder for named entities.
|
||||||
|
repeated string user_defined_symbols = 31;
|
||||||
|
|
||||||
|
// Defines required characters. Each UTF8 character in this string is included
|
||||||
|
// in the character set regardless of character_coverage value. Unlike
|
||||||
|
// user_defined_symbols, these characters have scores based on the frequency
|
||||||
|
// on input sentences, and the model can form subwords using characters
|
||||||
|
// in this field.
|
||||||
|
optional string required_chars = 36;
|
||||||
|
|
||||||
|
// Decomposes unknown pieces into UTF-8 bytes.
|
||||||
|
optional bool byte_fallback = 35 [default = false];
|
||||||
|
|
||||||
|
// When creating the vocabulary file, defines whether or not to additionally
|
||||||
|
// output the score for each piece.
|
||||||
|
optional bool vocabulary_output_piece_score = 32 [default = true];
|
||||||
|
|
||||||
|
// `vocab_size` is treated as hard limit. Crash if
|
||||||
|
// the model can not produce the vocab of size `vocab_size`,
|
||||||
|
// When `hard_vocab_limit` is false, vocab_size is treated
|
||||||
|
// as soft limit. Note that when model_type=char,
|
||||||
|
// always assumes hard_vocab_limit = false.
|
||||||
|
optional bool hard_vocab_limit = 33 [default = true];
|
||||||
|
|
||||||
|
// use all symbols for vocab extraction. This flag is valid
|
||||||
|
// if model type is either CHAR or WORD
|
||||||
|
optional bool use_all_vocab = 34 [default = false];
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////
|
||||||
|
// Reserved special meta tokens.
|
||||||
|
// * -1 is not used.
|
||||||
|
// * unk_id must not be -1.
|
||||||
|
// Id must starts with 0 and be contigous.
|
||||||
|
optional int32 unk_id = 40 [default = 0]; // <unk>
|
||||||
|
optional int32 bos_id = 41 [default = 1]; // <s>
|
||||||
|
optional int32 eos_id = 42 [default = 2]; // </s>
|
||||||
|
optional int32 pad_id = 43 [default = -1]; // <pad> (padding)
|
||||||
|
optional string unk_piece = 45 [default = "<unk>"];
|
||||||
|
optional string bos_piece = 46 [default = "<s>"];
|
||||||
|
optional string eos_piece = 47 [default = "</s>"];
|
||||||
|
optional string pad_piece = 48 [default = "<pad>"];
|
||||||
|
|
||||||
|
// Encodes <unk> into U+2047 (DOUBLE QUESTION MARK),
|
||||||
|
// since this character can be useful both for user and
|
||||||
|
// developer. We can easily figure out that <unk> is emitted.
|
||||||
|
optional string unk_surface = 44 [default = " \xE2\x81\x87 "];
|
||||||
|
|
||||||
|
// Increase bit depth to allow unigram model training on large
|
||||||
|
// (>10M sentences) corpora. A Side-effect of enabling this flag
|
||||||
|
// is increased memory usage.
|
||||||
|
optional bool train_extremely_large_corpus = 49 [default = false];
|
||||||
|
|
||||||
|
// Path to a seed sentencepieces file, with one tab-separated
|
||||||
|
// seed sentencepiece <tab> frequency per line.
|
||||||
|
optional string seed_sentencepieces_file = 54 [default = ""];
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizerSpec encodes a various parameters for string normalizaiton
|
||||||
|
message NormalizerSpec {
|
||||||
|
// name of normalization rule.
|
||||||
|
optional string name = 1;
|
||||||
|
|
||||||
|
// Pre-compiled normalization rule created by
|
||||||
|
// Builder::GetPrecompiledCharsMap() or Builder::CompileCharsMap() method.
|
||||||
|
// Usually this field is set by Builder::GetNormalizerSpec() method.
|
||||||
|
optional bytes precompiled_charsmap = 2;
|
||||||
|
|
||||||
|
// Adds dummy whitespace at the beginning of text in order to
|
||||||
|
// treat "world" in "world" and "hello world" in the same way.
|
||||||
|
optional bool add_dummy_prefix = 3 [default = true];
|
||||||
|
|
||||||
|
// Removes leading, trailing, and duplicate internal whitespace.
|
||||||
|
optional bool remove_extra_whitespaces = 4 [default = true];
|
||||||
|
|
||||||
|
// Replaces whitespace with meta symbol.
|
||||||
|
// This field must be true to train sentence piece model.
|
||||||
|
optional bool escape_whitespaces = 5 [default = true];
|
||||||
|
|
||||||
|
// Custom normalization rule file in TSV format.
|
||||||
|
// https://github.com/google/sentencepiece/blob/master/doc/normalization.md
|
||||||
|
// This field is only used in SentencePieceTrainer::Train() method, which
|
||||||
|
// compiles the rule into the binary rule stored in `precompiled_charsmap`.
|
||||||
|
optional string normalization_rule_tsv = 6;
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proto to store samples for self-testing.
|
||||||
|
message SelfTestData {
|
||||||
|
message Sample {
|
||||||
|
optional string input = 1;
|
||||||
|
optional string expected = 2;
|
||||||
|
}
|
||||||
|
repeated Sample samples = 1;
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModelProto stores model parameters.
|
||||||
|
// SentencePieceProcessor is supposed to be self-contained.
|
||||||
|
// All settings/parameters which may change the behavior must be encoded
|
||||||
|
// in ModelProto.
|
||||||
|
message ModelProto {
|
||||||
|
message SentencePiece {
|
||||||
|
enum Type {
|
||||||
|
NORMAL = 1; // normal symbol
|
||||||
|
UNKNOWN = 2; // unknown symbol. only <unk> for now.
|
||||||
|
CONTROL = 3; // control symbols. </s>, <s>, <2ja> etc.
|
||||||
|
USER_DEFINED = 4; // user defined symbols.
|
||||||
|
// Typical usage of USER_DEFINED symbol
|
||||||
|
// is placeholder.
|
||||||
|
BYTE = 6; // byte symbols. Used when `byte_fallback` is true.
|
||||||
|
UNUSED = 5; // this piece is not used.
|
||||||
|
}
|
||||||
|
optional string piece = 1; // piece must not be empty.
|
||||||
|
optional float score = 2;
|
||||||
|
optional Type type = 3 [default = NORMAL];
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sentence pieces with scores.
|
||||||
|
repeated SentencePiece pieces = 1;
|
||||||
|
|
||||||
|
// Spec used to generate this model file.
|
||||||
|
optional TrainerSpec trainer_spec = 2;
|
||||||
|
|
||||||
|
// Spec for text normalization.
|
||||||
|
optional NormalizerSpec normalizer_spec = 3;
|
||||||
|
|
||||||
|
// Stores sample input and its expected segmentation to verify the model.
|
||||||
|
optional SelfTestData self_test_data = 4;
|
||||||
|
|
||||||
|
// Spec for text de-normalization.
|
||||||
|
optional NormalizerSpec denormalizer_spec = 5;
|
||||||
|
|
||||||
|
// Customized extensions: the range of field numbers
|
||||||
|
// are open to third-party extensions.
|
||||||
|
extensions 200 to max;
|
||||||
|
}
|
||||||
106
convert/tokenizer.go
Normal file
106
convert/tokenizer.go
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"cmp"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"slices"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Tokenizer struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
AddedTokens []Token `json:"added_tokens"`
|
||||||
|
Model TokenizerModel `json:"model"`
|
||||||
|
|
||||||
|
PreTokenizer struct {
|
||||||
|
PreTokenizers []struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Pattern struct {
|
||||||
|
Regex string `json:"Regex"`
|
||||||
|
} `json:"pattern"`
|
||||||
|
} `json:"pretokenizers"`
|
||||||
|
} `json:"pre_tokenizer"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TokenizerModel struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Vocab map[string]int `json:"vocab"`
|
||||||
|
Merges []string `json:"merges"`
|
||||||
|
Tokens []Token
|
||||||
|
}
|
||||||
|
|
||||||
|
type Token struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Special bool `json:"special"`
|
||||||
|
UserDefined bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Token) Type() int32 {
|
||||||
|
switch {
|
||||||
|
case t.Special:
|
||||||
|
return tokenTypeControl
|
||||||
|
case t.UserDefined:
|
||||||
|
return tokenTypeUserDefined
|
||||||
|
default:
|
||||||
|
return tokenTypeNormal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tokenizer) maxID() int {
|
||||||
|
return max(
|
||||||
|
slices.Max(maps.Values(t.Model.Vocab)),
|
||||||
|
slices.MaxFunc(t.AddedTokens, func(a, b Token) int {
|
||||||
|
return cmp.Compare(a.ID, b.ID)
|
||||||
|
}).ID,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, err error) {
|
||||||
|
f, err := os.Open(dirpath)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
var t Tokenizer
|
||||||
|
if err := json.NewDecoder(f).Decode(&t); err != nil {
|
||||||
|
return "", nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tokens = make([]Token, t.maxID()+1)
|
||||||
|
for k, v := range t.Model.Vocab {
|
||||||
|
tokens[v] = Token{ID: v, Content: k, Special: false, UserDefined: false}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range t.AddedTokens {
|
||||||
|
v.UserDefined = true
|
||||||
|
tokens[v.ID] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
sha256sum := sha256.New()
|
||||||
|
for _, pt := range t.PreTokenizer.PreTokenizers {
|
||||||
|
if pt.Type == "Split" && pt.Pattern.Regex != "" {
|
||||||
|
sha256sum.Write([]byte(pt.Pattern.Regex))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest {
|
||||||
|
case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f":
|
||||||
|
pre = "llama-bpe"
|
||||||
|
case "03df5c5863ad70781dcfdef491ead25140f895fe8010964be0daefe27be32b02":
|
||||||
|
pre = "deepseek-llm"
|
||||||
|
case "21cde974d587f0d54dc8d56b183cc1e6239600172035c68fbd6d4b9f8da0576e":
|
||||||
|
pre = "deepseek-coder"
|
||||||
|
default:
|
||||||
|
slog.Warn("unknown pretokenizer, using default", "digest", digest)
|
||||||
|
pre = "default"
|
||||||
|
}
|
||||||
|
|
||||||
|
return pre, tokens, t.Model.Merges, nil
|
||||||
|
}
|
||||||
287
convert/torch.go
Normal file
287
convert/torch.go
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
package convert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/nlpodyssey/gopickle/pytorch"
|
||||||
|
"github.com/nlpodyssey/gopickle/types"
|
||||||
|
"github.com/x448/float16"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type torchWriterTo struct {
|
||||||
|
t *llm.Tensor
|
||||||
|
|
||||||
|
params *Params
|
||||||
|
bo ByteOrder
|
||||||
|
|
||||||
|
storage pytorch.StorageInterface
|
||||||
|
repacker func(string, []float32, []uint64) ([]float32, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TorchFormat struct{}
|
||||||
|
|
||||||
|
func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor, error) {
|
||||||
|
slog.Debug("getting torch tensors")
|
||||||
|
|
||||||
|
var files []string
|
||||||
|
if pt, _ := filepath.Glob(filepath.Join(dirpath, "consolidated*.pth")); len(pt) > 0 {
|
||||||
|
files = append(files, pt...)
|
||||||
|
} else if pt, _ := filepath.Glob(filepath.Join(dirpath, "pytorch_model*.pth")); len(pt) > 0 {
|
||||||
|
files = append(files, pt...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset uint64
|
||||||
|
var tensors []llm.Tensor
|
||||||
|
for _, fn := range files {
|
||||||
|
m, err := pytorch.Load(fn)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(fmt.Sprintf("error unpickling: %q", err))
|
||||||
|
return []llm.Tensor{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, k := range m.(*types.Dict).Keys() {
|
||||||
|
if strings.HasSuffix(k.(string), "self_attn.rotary_emb.inv_freq") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
t, _ := m.(*types.Dict).Get(k)
|
||||||
|
tshape := t.(*pytorch.Tensor).Size
|
||||||
|
|
||||||
|
var size uint64
|
||||||
|
var kind uint32
|
||||||
|
switch len(tshape) {
|
||||||
|
case 0:
|
||||||
|
continue
|
||||||
|
case 1:
|
||||||
|
// convert to float32
|
||||||
|
kind = 0
|
||||||
|
size = uint64(tshape[0] * 4)
|
||||||
|
case 2:
|
||||||
|
// convert to float16
|
||||||
|
kind = 1
|
||||||
|
size = uint64(tshape[0] * tshape[1] * 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
ggufName, err := tf.GetLayerName(k.(string))
|
||||||
|
if err != nil {
|
||||||
|
slog.Error(err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
slog.Debug(fmt.Sprintf("'%35s': '%30s' %10d [%#v]", k.(string), ggufName, size, tshape))
|
||||||
|
|
||||||
|
shape := []uint64{0, 0, 0, 0}
|
||||||
|
for i := range tshape {
|
||||||
|
shape[i] = uint64(tshape[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
tensor := llm.Tensor{
|
||||||
|
Name: ggufName,
|
||||||
|
Kind: kind,
|
||||||
|
Offset: offset, // calculate the offset
|
||||||
|
Shape: shape,
|
||||||
|
}
|
||||||
|
|
||||||
|
tensor.WriterTo = torchWriterTo{
|
||||||
|
t: &tensor,
|
||||||
|
params: params,
|
||||||
|
bo: params.ByteOrder,
|
||||||
|
storage: t.(*pytorch.Tensor).Source,
|
||||||
|
}
|
||||||
|
|
||||||
|
tensors = append(tensors, tensor)
|
||||||
|
offset += size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tensors, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAltParams(dirpath string) (*Params, error) {
|
||||||
|
f, err := os.Open(filepath.Join(dirpath, "params.json"))
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("no params.json")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
type TorchParams struct {
|
||||||
|
HiddenSize int `json:"dim"`
|
||||||
|
AttentionHeads int `json:"n_heads"`
|
||||||
|
KeyValHeads int `json:"n_kv_heads"`
|
||||||
|
HiddenLayers int `json:"n_layers"`
|
||||||
|
RopeTheta float64 `json:"rope_theta"`
|
||||||
|
NormEPS float64 `json:"norm_eps"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var tparams TorchParams
|
||||||
|
|
||||||
|
d := json.NewDecoder(f)
|
||||||
|
err = d.Decode(&tparams)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &Params{
|
||||||
|
Architectures: []string{"LlamaForCausalLM"},
|
||||||
|
HiddenSize: tparams.HiddenSize,
|
||||||
|
AttentionHeads: tparams.AttentionHeads,
|
||||||
|
KeyValHeads: tparams.KeyValHeads,
|
||||||
|
HiddenLayers: tparams.HiddenLayers,
|
||||||
|
NormEPS: tparams.NormEPS,
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case tparams.RopeTheta == 1000000:
|
||||||
|
// Codellama
|
||||||
|
params.ContextSize = 16384
|
||||||
|
case tparams.NormEPS == 1e-06:
|
||||||
|
// llama2
|
||||||
|
slog.Debug("Found llama2 - setting context size to 4096")
|
||||||
|
params.ContextSize = 4096
|
||||||
|
default:
|
||||||
|
params.ContextSize = 2048
|
||||||
|
}
|
||||||
|
|
||||||
|
params.ByteOrder = binary.LittleEndian
|
||||||
|
return params, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TorchFormat) GetParams(dirpath string) (*Params, error) {
|
||||||
|
f, err := os.Open(filepath.Join(dirpath, "config.json"))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
// try params.json instead
|
||||||
|
return getAltParams(dirpath)
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var params Params
|
||||||
|
d := json.NewDecoder(f)
|
||||||
|
err = d.Decode(¶ms)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
params.ByteOrder = binary.LittleEndian
|
||||||
|
return ¶ms, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TorchFormat) GetLayerName(n string) (string, error) {
|
||||||
|
directMap := map[string]string{
|
||||||
|
"tok_embeddings.weight": "token_embd.weight",
|
||||||
|
"output.weight": "output.weight",
|
||||||
|
"norm.weight": "output_norm.weight",
|
||||||
|
"rope.freqs": "rope_freqs.weight",
|
||||||
|
"model.embed_tokens.weight": "token_embd.weight",
|
||||||
|
"lm_head.weight": "output.weight",
|
||||||
|
"model.norm.weight": "output_norm.weight",
|
||||||
|
}
|
||||||
|
|
||||||
|
lMap := map[string]string{
|
||||||
|
"layers.(\\d+).attention_norm.weight": "blk.$1.attn_norm.weight",
|
||||||
|
"layers.(\\d+).attention_output_norm.weight": "blk.$1.attn_norm.weight",
|
||||||
|
"layers.(\\d+).feed_forward.w2.weight": "blk.$1.ffn_down.weight",
|
||||||
|
"layers.(\\d+).feed_forward.w1.weight": "blk.$1.ffn_gate.weight",
|
||||||
|
"layers.(\\d+).feed_forward.w3.weight": "blk.$1.ffn_up.weight",
|
||||||
|
"layers.(\\d+).ffn_norm.weight": "blk.$1.ffn_norm.weight",
|
||||||
|
"layers.(\\d+).attention.wk.weight": "blk.$1.attn_k.weight",
|
||||||
|
"layers.(\\d+).attention.wo.weight": "blk.$1.attn_output.weight",
|
||||||
|
"layers.(\\d+).attention.wq.weight": "blk.$1.attn_q.weight",
|
||||||
|
"layers.(\\d+).attention.wv.weight": "blk.$1.attn_v.weight",
|
||||||
|
"model.layers.(\\d+).input_layernorm.weight": "blk.$1.attn_norm.weight",
|
||||||
|
"model.layers.(\\d+).mlp.down_proj.weight": "blk.$1.ffn_down.weight",
|
||||||
|
"model.layers.(\\d+).mlp.gate_proj.weight": "blk.$1.ffn_gate.weight",
|
||||||
|
"model.layers.(\\d+).mlp.up_proj.weight": "blk.$1.ffn_up.weight",
|
||||||
|
"model.layers.(\\d+).post_attention_layernorm.weight": "blk.$1.ffn_norm.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.k_proj.weight": "blk.$1.attn_k.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.o_proj.weight": "blk.$1.attn_output.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.q_proj.weight": "blk.$1.attn_q.weight",
|
||||||
|
"model.layers.(\\d+).self_attn.v_proj.weight": "blk.$1.attn_v.weight",
|
||||||
|
}
|
||||||
|
|
||||||
|
v, ok := directMap[n]
|
||||||
|
if ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// quick hack to rename the layers to gguf format
|
||||||
|
for k, v := range lMap {
|
||||||
|
re := regexp.MustCompile(k)
|
||||||
|
newName := re.ReplaceAllString(n, v)
|
||||||
|
if newName != n {
|
||||||
|
return newName, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", fmt.Errorf("couldn't find a layer name for '%s'", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r torchWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
||||||
|
var f32s []float32
|
||||||
|
switch s := r.storage.(type) {
|
||||||
|
case *pytorch.FloatStorage:
|
||||||
|
f32s = s.Data
|
||||||
|
case *pytorch.HalfStorage:
|
||||||
|
f32s = s.Data
|
||||||
|
case *pytorch.BFloat16Storage:
|
||||||
|
f32s = s.Data
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown data type: %T", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.repacker != nil {
|
||||||
|
f32s, err = r.repacker(r.t.Name, f32s, r.t.Shape)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch r.t.Kind {
|
||||||
|
case 0:
|
||||||
|
return 0, binary.Write(w, r.bo, f32s)
|
||||||
|
case 1:
|
||||||
|
f16s := make([]uint16, len(f32s))
|
||||||
|
for i := range f32s {
|
||||||
|
f16s[i] = float16.Fromfloat32(f32s[i]).Bits()
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, binary.Write(w, r.bo, f16s)
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("unknown storage type: %d", r.t.Kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *TorchFormat) GetModelArch(name, dirPath string, params *Params) (ModelArch, error) {
|
||||||
|
switch len(params.Architectures) {
|
||||||
|
case 0:
|
||||||
|
return nil, fmt.Errorf("No architecture specified to convert")
|
||||||
|
case 1:
|
||||||
|
switch params.Architectures[0] {
|
||||||
|
case "LlamaForCausalLM":
|
||||||
|
return &LlamaModel{
|
||||||
|
ModelData{
|
||||||
|
Name: name,
|
||||||
|
Path: dirPath,
|
||||||
|
Params: params,
|
||||||
|
Format: m,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Models based on '%s' are not yet supported", params.Architectures[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("Unknown error")
|
||||||
|
}
|
||||||
@@ -1,6 +1,21 @@
|
|||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
- [Modelfile](./modelfile.md)
|
### Getting Started
|
||||||
- [How to develop Ollama](./development.md)
|
* [Quickstart](../README.md#quickstart)
|
||||||
- [API](./api.md)
|
* [Examples](../examples)
|
||||||
- [Tutorials](./tutorials.md)
|
* [Importing models](./import.md)
|
||||||
|
* [Linux Documentation](./linux.md)
|
||||||
|
* [Windows Documentation](./windows.md)
|
||||||
|
* [Docker Documentation](./docker.md)
|
||||||
|
|
||||||
|
### Reference
|
||||||
|
|
||||||
|
* [API Reference](./api.md)
|
||||||
|
* [Modelfile Reference](./modelfile.md)
|
||||||
|
* [OpenAI Compatibility](./openai.md)
|
||||||
|
|
||||||
|
### Resources
|
||||||
|
|
||||||
|
* [Troubleshooting Guide](./troubleshooting.md)
|
||||||
|
* [FAQ](./faq.md)
|
||||||
|
* [Development guide](./development.md)
|
||||||
|
|||||||
665
docs/api.md
665
docs/api.md
@@ -3,6 +3,7 @@
|
|||||||
## Endpoints
|
## Endpoints
|
||||||
|
|
||||||
- [Generate a completion](#generate-a-completion)
|
- [Generate a completion](#generate-a-completion)
|
||||||
|
- [Generate a chat completion](#generate-a-chat-completion)
|
||||||
- [Create a Model](#create-a-model)
|
- [Create a Model](#create-a-model)
|
||||||
- [List Local Models](#list-local-models)
|
- [List Local Models](#list-local-models)
|
||||||
- [Show Model Information](#show-model-information)
|
- [Show Model Information](#show-model-information)
|
||||||
@@ -11,12 +12,13 @@
|
|||||||
- [Pull a Model](#pull-a-model)
|
- [Pull a Model](#pull-a-model)
|
||||||
- [Push a Model](#push-a-model)
|
- [Push a Model](#push-a-model)
|
||||||
- [Generate Embeddings](#generate-embeddings)
|
- [Generate Embeddings](#generate-embeddings)
|
||||||
|
- [List Running Models](#list-running-models)
|
||||||
|
|
||||||
## Conventions
|
## Conventions
|
||||||
|
|
||||||
### Model names
|
### Model names
|
||||||
|
|
||||||
Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
|
Model names follow a `model:tag` format, where `model` can have an optional namespace such as `example/model`. Some examples are `orca-mini:3b-q4_1` and `llama3:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
|
||||||
|
|
||||||
### Durations
|
### Durations
|
||||||
|
|
||||||
@@ -24,7 +26,7 @@ All durations are returned in nanoseconds.
|
|||||||
|
|
||||||
### Streaming responses
|
### Streaming responses
|
||||||
|
|
||||||
Certain endpoints stream responses as JSON objects delineated with the newline (`\n`) character.
|
Certain endpoints stream responses as JSON objects and can optional return non-streamed responses.
|
||||||
|
|
||||||
## Generate a completion
|
## Generate a completion
|
||||||
|
|
||||||
@@ -32,47 +34,51 @@ Certain endpoints stream responses as JSON objects delineated with the newline (
|
|||||||
POST /api/generate
|
POST /api/generate
|
||||||
```
|
```
|
||||||
|
|
||||||
Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses. The final response object will include statistics and additional data from the request.
|
Generate a response for a given prompt with a provided model. This is a streaming endpoint, so there will be a series of responses. The final response object will include statistics and additional data from the request.
|
||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
- `model`: (required) the [model name](#model-names)
|
- `model`: (required) the [model name](#model-names)
|
||||||
- `prompt`: the prompt to generate a response for
|
- `prompt`: the prompt to generate a response for
|
||||||
|
- `images`: (optional) a list of base64-encoded images (for multimodal models such as `llava`)
|
||||||
|
|
||||||
Advanced parameters (optional):
|
Advanced parameters (optional):
|
||||||
|
|
||||||
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
||||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||||
- `system`: system prompt to (overrides what is defined in the `Modelfile`)
|
- `system`: system message to (overrides what is defined in the `Modelfile`)
|
||||||
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
|
- `template`: the prompt template to use (overrides what is defined in the `Modelfile`)
|
||||||
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
|
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
|
||||||
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||||
- `raw`: if `true` no formatting will be applied to the prompt and no context will be returned. You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API, and are managing history yourself.
|
- `raw`: if `true` no formatting will be applied to the prompt. You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API
|
||||||
|
- `keep_alive`: controls how long the model will stay loaded into memory following the request (default: `5m`)
|
||||||
|
|
||||||
### JSON mode
|
#### JSON mode
|
||||||
|
|
||||||
Enable JSON mode by setting the `format` parameter to `json`. This will structure the response as valid JSON. See the JSON mode [example](#request-json-mode) below.
|
Enable JSON mode by setting the `format` parameter to `json`. This will structure the response as a valid JSON object. See the JSON mode [example](#request-json-mode) below.
|
||||||
|
|
||||||
> Note: it's important to instruct the model to use JSON in the `prompt`. Otherwise, the model may generate large amounts whitespace.
|
> Note: it's important to instruct the model to use JSON in the `prompt`. Otherwise, the model may generate large amounts whitespace.
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
#### Request
|
#### Generate request (Streaming)
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?"
|
"prompt": "Why is the sky blue?"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Response
|
##### Response
|
||||||
|
|
||||||
A stream of JSON objects is returned:
|
A stream of JSON objects is returned:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
"response": "The",
|
"response": "The",
|
||||||
"done": false
|
"done": false
|
||||||
@@ -83,122 +89,95 @@ The final response in the stream also includes additional data about the generat
|
|||||||
|
|
||||||
- `total_duration`: time spent generating the response
|
- `total_duration`: time spent generating the response
|
||||||
- `load_duration`: time spent in nanoseconds loading the model
|
- `load_duration`: time spent in nanoseconds loading the model
|
||||||
- `sample_count`: number of samples generated
|
|
||||||
- `sample_duration`: time spent generating samples
|
|
||||||
- `prompt_eval_count`: number of tokens in the prompt
|
- `prompt_eval_count`: number of tokens in the prompt
|
||||||
- `prompt_eval_duration`: time spent in nanoseconds evaluating the prompt
|
- `prompt_eval_duration`: time spent in nanoseconds evaluating the prompt
|
||||||
- `eval_count`: number of tokens the response
|
- `eval_count`: number of tokens in the response
|
||||||
- `eval_duration`: time in nanoseconds spent generating the response
|
- `eval_duration`: time in nanoseconds spent generating the response
|
||||||
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
|
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
|
||||||
- `response`: empty if the response was streamed, if not streamed, this will contain the full response
|
- `response`: empty if the response was streamed, if not streamed, this will contain the full response
|
||||||
|
|
||||||
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`.
|
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration` * `10^9`.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"response": "",
|
"response": "",
|
||||||
"context": [1, 2, 3],
|
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 5589157167,
|
"context": [1, 2, 3],
|
||||||
"load_duration": 3013701500,
|
"total_duration": 10706818083,
|
||||||
"sample_count": 114,
|
"load_duration": 6338219291,
|
||||||
"sample_duration": 81442000,
|
"prompt_eval_count": 26,
|
||||||
"prompt_eval_count": 46,
|
"prompt_eval_duration": 130079000,
|
||||||
"prompt_eval_duration": 1160282000,
|
"eval_count": 259,
|
||||||
"eval_count": 113,
|
"eval_duration": 4232710000
|
||||||
"eval_duration": 1325948000
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Request (No streaming)
|
#### Request (No streaming)
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
A response can be received in one reply when streaming is off.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?",
|
"prompt": "Why is the sky blue?",
|
||||||
"stream": false
|
"stream": false
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Response
|
##### Response
|
||||||
|
|
||||||
If `stream` is set to `false`, the response will be a single JSON object:
|
If `stream` is set to `false`, the response will be a single JSON object:
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"response": "The sky is blue because it is the color of the sky.",
|
"response": "The sky is blue because it is the color of the sky.",
|
||||||
|
"done": true,
|
||||||
"context": [1, 2, 3],
|
"context": [1, 2, 3],
|
||||||
"done": true,
|
"total_duration": 5043500667,
|
||||||
"total_duration": 5589157167,
|
"load_duration": 5025959,
|
||||||
"load_duration": 3013701500,
|
"prompt_eval_count": 26,
|
||||||
"sample_count": 114,
|
"prompt_eval_duration": 325953000,
|
||||||
"sample_duration": 81442000,
|
"eval_count": 290,
|
||||||
"prompt_eval_count": 46,
|
"eval_duration": 4709213000
|
||||||
"prompt_eval_duration": 1160282000,
|
|
||||||
"eval_count": 13,
|
|
||||||
"eval_duration": 1325948000
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Request (Raw mode)
|
|
||||||
|
|
||||||
In some cases you may wish to bypass the templating system and provide a full prompt. In this case, you can use the `raw` parameter to disable formatting and context.
|
|
||||||
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/api/generate -d '{
|
|
||||||
"model": "mistral",
|
|
||||||
"prompt": "[INST] why is the sky blue? [/INST]",
|
|
||||||
"raw": true,
|
|
||||||
"stream": false
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Response
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"model": "mistral",
|
|
||||||
"created_at": "2023-11-03T15:36:02.583064Z",
|
|
||||||
"response": " The sky appears blue because of a phenomenon called Rayleigh scattering.",
|
|
||||||
"done": true,
|
|
||||||
"total_duration": 14648695333,
|
|
||||||
"load_duration": 3302671417,
|
|
||||||
"prompt_eval_count": 14,
|
|
||||||
"prompt_eval_duration": 286243000,
|
|
||||||
"eval_count": 129,
|
|
||||||
"eval_duration": 10931424000
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Request (JSON mode)
|
#### Request (JSON mode)
|
||||||
|
|
||||||
|
> When `format` is set to `json`, the output will always be a well-formed JSON object. It's important to also instruct the model to respond in JSON.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"prompt": "What color is the sky at different times of the day? Respond using JSON",
|
"prompt": "What color is the sky at different times of the day? Respond using JSON",
|
||||||
"format": "json",
|
"format": "json",
|
||||||
"stream": false
|
"stream": false
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Response
|
##### Response
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-11-09T21:07:55.186497Z",
|
"created_at": "2023-11-09T21:07:55.186497Z",
|
||||||
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 4661289125,
|
"context": [1, 2, 3],
|
||||||
"load_duration": 1714434500,
|
"total_duration": 4648158584,
|
||||||
|
"load_duration": 4071084,
|
||||||
"prompt_eval_count": 36,
|
"prompt_eval_count": 36,
|
||||||
"prompt_eval_duration": 264132000,
|
"prompt_eval_duration": 439038000,
|
||||||
"eval_count": 75,
|
"eval_count": 180,
|
||||||
"eval_duration": 2112149000
|
"eval_duration": 4196918000
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -221,13 +200,96 @@ The value of `response` will be a string containing JSON similar to:
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Request (With options)
|
#### Request (with images)
|
||||||
|
|
||||||
If you want to set custom options for the model at runtime rather than in the Modelfile, you can do so with the `options` parameter. This example sets every available option, but you can set any of them individually and omit the ones you do not want to override.
|
To submit images to multimodal models such as `llava` or `bakllava`, provide a list of base64-encoded `images`:
|
||||||
|
|
||||||
|
#### Request
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/generate -d '{
|
curl http://localhost:11434/api/generate -d '{
|
||||||
"model": "llama2",
|
"model": "llava",
|
||||||
|
"prompt":"What is in this picture?",
|
||||||
|
"stream": false,
|
||||||
|
"images": ["iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Response
|
||||||
|
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"model": "llava",
|
||||||
|
"created_at": "2023-11-03T15:36:02.583064Z",
|
||||||
|
"response": "A happy cartoon character, which is cute and cheerful.",
|
||||||
|
"done": true,
|
||||||
|
"context": [1, 2, 3],
|
||||||
|
"total_duration": 2938432250,
|
||||||
|
"load_duration": 2559292,
|
||||||
|
"prompt_eval_count": 1,
|
||||||
|
"prompt_eval_duration": 2195557000,
|
||||||
|
"eval_count": 44,
|
||||||
|
"eval_duration": 736432000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Request (Raw Mode)
|
||||||
|
|
||||||
|
In some cases, you may wish to bypass the templating system and provide a full prompt. In this case, you can use the `raw` parameter to disable templating. Also note that raw mode will not return a context.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/generate -d '{
|
||||||
|
"model": "mistral",
|
||||||
|
"prompt": "[INST] why is the sky blue? [/INST]",
|
||||||
|
"raw": true,
|
||||||
|
"stream": false
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Request (Reproducible outputs)
|
||||||
|
|
||||||
|
For reproducible outputs, set `seed` to a number:
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/generate -d '{
|
||||||
|
"model": "mistral",
|
||||||
|
"prompt": "Why is the sky blue?",
|
||||||
|
"options": {
|
||||||
|
"seed": 123
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "mistral",
|
||||||
|
"created_at": "2023-11-03T15:36:02.583064Z",
|
||||||
|
"response": " The sky appears blue because of a phenomenon called Rayleigh scattering.",
|
||||||
|
"done": true,
|
||||||
|
"total_duration": 8493852375,
|
||||||
|
"load_duration": 6589624375,
|
||||||
|
"prompt_eval_count": 14,
|
||||||
|
"prompt_eval_duration": 119039000,
|
||||||
|
"eval_count": 110,
|
||||||
|
"eval_duration": 1779061000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Generate request (With options)
|
||||||
|
|
||||||
|
If you want to set custom options for the model at runtime rather than in the Modelfile, you can do so with the `options` parameter. This example sets every available option, but you can set any of them individually and omit the ones you do not want to override.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/generate -d '{
|
||||||
|
"model": "llama3",
|
||||||
"prompt": "Why is the sky blue?",
|
"prompt": "Why is the sky blue?",
|
||||||
"stream": false,
|
"stream": false,
|
||||||
"options": {
|
"options": {
|
||||||
@@ -249,42 +311,314 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
"penalize_newline": true,
|
"penalize_newline": true,
|
||||||
"stop": ["\n", "user:"],
|
"stop": ["\n", "user:"],
|
||||||
"numa": false,
|
"numa": false,
|
||||||
"num_ctx": 4,
|
"num_ctx": 1024,
|
||||||
"num_batch": 2,
|
"num_batch": 2,
|
||||||
"num_gqa": 1,
|
|
||||||
"num_gpu": 1,
|
"num_gpu": 1,
|
||||||
"main_gpu": 0,
|
"main_gpu": 0,
|
||||||
"low_vram": false,
|
"low_vram": false,
|
||||||
"f16_kv": true,
|
"f16_kv": true,
|
||||||
"logits_all": false,
|
|
||||||
"vocab_only": false,
|
"vocab_only": false,
|
||||||
"use_mmap": true,
|
"use_mmap": true,
|
||||||
"use_mlock": false,
|
"use_mlock": false,
|
||||||
"embedding_only": false,
|
|
||||||
"rope_frequency_base": 1.1,
|
|
||||||
"rope_frequency_scale": 0.8,
|
|
||||||
"num_thread": 8
|
"num_thread": 8
|
||||||
}
|
}
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Response
|
##### Response
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model": "llama3",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"response": "The sky is blue because it is the color of the sky.",
|
"response": "The sky is blue because it is the color of the sky.",
|
||||||
"context": [1, 2, 3],
|
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 5589157167,
|
"context": [1, 2, 3],
|
||||||
"load_duration": 3013701500,
|
"total_duration": 4935886791,
|
||||||
"sample_count": 114,
|
"load_duration": 534986708,
|
||||||
"sample_duration": 81442000,
|
"prompt_eval_count": 26,
|
||||||
"prompt_eval_count": 46,
|
"prompt_eval_duration": 107345000,
|
||||||
"prompt_eval_duration": 1160282000,
|
"eval_count": 237,
|
||||||
"eval_count": 13,
|
"eval_duration": 4289432000
|
||||||
"eval_duration": 1325948000
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Load a model
|
||||||
|
|
||||||
|
If an empty prompt is provided, the model will be loaded into memory.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/generate -d '{
|
||||||
|
"model": "llama3"
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
A single JSON object is returned:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "llama3",
|
||||||
|
"created_at": "2023-12-18T19:52:07.071755Z",
|
||||||
|
"response": "",
|
||||||
|
"done": true
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generate a chat completion
|
||||||
|
|
||||||
|
```shell
|
||||||
|
POST /api/chat
|
||||||
|
```
|
||||||
|
|
||||||
|
Generate the next message in a chat with a provided model. This is a streaming endpoint, so there will be a series of responses. Streaming can be disabled using `"stream": false`. The final response object will include statistics and additional data from the request.
|
||||||
|
|
||||||
|
### Parameters
|
||||||
|
|
||||||
|
- `model`: (required) the [model name](#model-names)
|
||||||
|
- `messages`: the messages of the chat, this can be used to keep a chat memory
|
||||||
|
|
||||||
|
The `message` object has the following fields:
|
||||||
|
|
||||||
|
- `role`: the role of the message, either `system`, `user` or `assistant`
|
||||||
|
- `content`: the content of the message
|
||||||
|
- `images` (optional): a list of images to include in the message (for multimodal models such as `llava`)
|
||||||
|
|
||||||
|
Advanced parameters (optional):
|
||||||
|
|
||||||
|
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
||||||
|
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||||
|
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||||
|
- `keep_alive`: controls how long the model will stay loaded into memory following the request (default: `5m`)
|
||||||
|
|
||||||
|
### Examples
|
||||||
|
|
||||||
|
#### Chat Request (Streaming)
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
Send a chat message with a streaming response.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "llama3",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "why is the sky blue?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
A stream of JSON objects is returned:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "llama3",
|
||||||
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "The",
|
||||||
|
"images": null
|
||||||
|
},
|
||||||
|
"done": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Final response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "llama3",
|
||||||
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
|
"done": true,
|
||||||
|
"total_duration": 4883583458,
|
||||||
|
"load_duration": 1334875,
|
||||||
|
"prompt_eval_count": 26,
|
||||||
|
"prompt_eval_duration": 342546000,
|
||||||
|
"eval_count": 282,
|
||||||
|
"eval_duration": 4535599000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Chat request (No streaming)
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "llama3",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "why is the sky blue?"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"stream": false
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "registry.ollama.ai/library/llama3:latest",
|
||||||
|
"created_at": "2023-12-12T14:13:43.416799Z",
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Hello! How are you today?"
|
||||||
|
},
|
||||||
|
"done": true,
|
||||||
|
"total_duration": 5191566416,
|
||||||
|
"load_duration": 2154458,
|
||||||
|
"prompt_eval_count": 26,
|
||||||
|
"prompt_eval_duration": 383809000,
|
||||||
|
"eval_count": 298,
|
||||||
|
"eval_duration": 4799921000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Chat request (With History)
|
||||||
|
|
||||||
|
Send a chat message with a conversation history. You can use this same approach to start the conversation using multi-shot or chain-of-thought prompting.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "llama3",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "why is the sky blue?"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "due to rayleigh scattering."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "how is that different than mie scattering?"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
A stream of JSON objects is returned:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "llama3",
|
||||||
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "The"
|
||||||
|
},
|
||||||
|
"done": false
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Final response:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "llama3",
|
||||||
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
|
"done": true,
|
||||||
|
"total_duration": 8113331500,
|
||||||
|
"load_duration": 6396458,
|
||||||
|
"prompt_eval_count": 61,
|
||||||
|
"prompt_eval_duration": 398801000,
|
||||||
|
"eval_count": 468,
|
||||||
|
"eval_duration": 7701267000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Chat request (with images)
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
Send a chat message with a conversation history.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "llava",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "what is in this image?",
|
||||||
|
"images": ["iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "llava",
|
||||||
|
"created_at": "2023-12-13T22:42:50.203334Z",
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": " The image features a cute, little pig with an angry facial expression. It's wearing a heart on its shirt and is waving in the air. This scene appears to be part of a drawing or sketching project.",
|
||||||
|
"images": null
|
||||||
|
},
|
||||||
|
"done": true,
|
||||||
|
"total_duration": 1668506709,
|
||||||
|
"load_duration": 1986209,
|
||||||
|
"prompt_eval_count": 26,
|
||||||
|
"prompt_eval_duration": 359682000,
|
||||||
|
"eval_count": 83,
|
||||||
|
"eval_duration": 1303285000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Chat request (Reproducible outputs)
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/chat -d '{
|
||||||
|
"model": "llama3",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Hello!"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"options": {
|
||||||
|
"seed": 101,
|
||||||
|
"temperature": 0
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"model": "registry.ollama.ai/library/llama3:latest",
|
||||||
|
"created_at": "2023-12-12T14:13:43.416799Z",
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": "Hello! How are you today?"
|
||||||
|
},
|
||||||
|
"done": true,
|
||||||
|
"total_duration": 5191566416,
|
||||||
|
"load_duration": 2154458,
|
||||||
|
"prompt_eval_count": 26,
|
||||||
|
"prompt_eval_duration": 383809000,
|
||||||
|
"eval_count": 298,
|
||||||
|
"eval_duration": 4799921000
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -294,7 +628,7 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
POST /api/create
|
POST /api/create
|
||||||
```
|
```
|
||||||
|
|
||||||
Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation should also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using [Create a Blob](#create-a-blob) and the value to the path indicated in the response.
|
Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation must also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using [Create a Blob](#create-a-blob) and the value to the path indicated in the response.
|
||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
@@ -305,23 +639,35 @@ Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `m
|
|||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
#### Request
|
#### Create a new model
|
||||||
|
|
||||||
|
Create a new model from a `Modelfile`.
|
||||||
|
|
||||||
|
##### Request
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/create -d '{
|
curl http://localhost:11434/api/create -d '{
|
||||||
"name": "mario",
|
"name": "mario",
|
||||||
"modelfile": "FROM llama2\nSYSTEM You are mario from Super Mario Bros."
|
"modelfile": "FROM llama3\nSYSTEM You are mario from Super Mario Bros."
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Response
|
##### Response
|
||||||
|
|
||||||
A stream of JSON objects. When finished, `status` is `success`.
|
A stream of JSON objects. Notice that the final JSON object shows a `"status": "success"`.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{"status":"reading model metadata"}
|
||||||
"status": "parsing modelfile"
|
{"status":"creating system layer"}
|
||||||
}
|
{"status":"using already created layer sha256:22f7f8ef5f4c791c1b03d7eb414399294764d7cc82c7e94aa81a1feb80a983a2"}
|
||||||
|
{"status":"using already created layer sha256:8c17c2ebb0ea011be9981cc3922db8ca8fa61e828c5d3f44cb6ae342bf80460b"}
|
||||||
|
{"status":"using already created layer sha256:7c23fb36d80141c4ab8cdbb61ee4790102ebd2bf7aeff414453177d4f2110e5d"}
|
||||||
|
{"status":"using already created layer sha256:2e0493f67d0c8c9c68a8aeacdf6a38a2151cb3c4c1d42accf296e19810527988"}
|
||||||
|
{"status":"using already created layer sha256:2759286baa875dc22de5394b4a925701b1896a7e3f8e53275c36f75a877a82c9"}
|
||||||
|
{"status":"writing layer sha256:df30045fe90f0d750db82a058109cecd6d4de9c90a3d75b19c09e5f64580bb42"}
|
||||||
|
{"status":"writing layer sha256:f18a68eb09bf925bb1b669490407c1b1251c5db98dc4d3d81f3088498ea55690"}
|
||||||
|
{"status":"writing manifest"}
|
||||||
|
{"status":"success"}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Check if a Blob Exists
|
### Check if a Blob Exists
|
||||||
@@ -330,7 +676,7 @@ A stream of JSON objects. When finished, `status` is `success`.
|
|||||||
HEAD /api/blobs/:digest
|
HEAD /api/blobs/:digest
|
||||||
```
|
```
|
||||||
|
|
||||||
Check if a blob is known to the server.
|
Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not Ollama.ai.
|
||||||
|
|
||||||
#### Query Parameters
|
#### Query Parameters
|
||||||
|
|
||||||
@@ -354,7 +700,7 @@ Return 200 OK if the blob exists, 404 Not Found if it does not.
|
|||||||
POST /api/blobs/:digest
|
POST /api/blobs/:digest
|
||||||
```
|
```
|
||||||
|
|
||||||
Create a blob from a file. Returns the server file path.
|
Create a blob from a file on the server. Returns the server file path.
|
||||||
|
|
||||||
#### Query Parameters
|
#### Query Parameters
|
||||||
|
|
||||||
@@ -370,7 +716,7 @@ curl -T model.bin -X POST http://localhost:11434/api/blobs/sha256:29fdb92e57cf08
|
|||||||
|
|
||||||
##### Response
|
##### Response
|
||||||
|
|
||||||
Return 201 Created if the blob was successfully created.
|
Return 201 Created if the blob was successfully created, 400 Bad Request if the digest used is not expected.
|
||||||
|
|
||||||
## List Local Models
|
## List Local Models
|
||||||
|
|
||||||
@@ -396,14 +742,30 @@ A single JSON object will be returned.
|
|||||||
{
|
{
|
||||||
"models": [
|
"models": [
|
||||||
{
|
{
|
||||||
"name": "llama2",
|
"name": "codellama:13b",
|
||||||
"modified_at": "2023-08-02T17:02:23.713454393-07:00",
|
"modified_at": "2023-11-04T14:56:49.277302595-07:00",
|
||||||
"size": 3791730596
|
"size": 7365960935,
|
||||||
|
"digest": "9f438cb9cd581fc025612d27f7c1a6669ff83a8bb0ed86c94fcf4c5440555697",
|
||||||
|
"details": {
|
||||||
|
"format": "gguf",
|
||||||
|
"family": "llama",
|
||||||
|
"families": null,
|
||||||
|
"parameter_size": "13B",
|
||||||
|
"quantization_level": "Q4_0"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "llama2:13b",
|
"name": "llama3:latest",
|
||||||
"modified_at": "2023-08-08T12:08:38.093596297-07:00",
|
"modified_at": "2023-12-07T09:32:18.757212583-08:00",
|
||||||
"size": 7323310500
|
"size": 3825819519,
|
||||||
|
"digest": "fe938a131f40e6f6d40083c9f0f430a515233eb2edaa6d72eb85c50d64f2300e",
|
||||||
|
"details": {
|
||||||
|
"format": "gguf",
|
||||||
|
"family": "llama",
|
||||||
|
"families": null,
|
||||||
|
"parameter_size": "7B",
|
||||||
|
"quantization_level": "Q4_0"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -415,7 +777,7 @@ A single JSON object will be returned.
|
|||||||
POST /api/show
|
POST /api/show
|
||||||
```
|
```
|
||||||
|
|
||||||
Show details about a model including modelfile, template, parameters, license, and system prompt.
|
Show information about a model including details, modelfile, template, parameters, license, and system prompt.
|
||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
@@ -427,7 +789,7 @@ Show details about a model including modelfile, template, parameters, license, a
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/show -d '{
|
curl http://localhost:11434/api/show -d '{
|
||||||
"name": "llama2"
|
"name": "llama3"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -435,10 +797,16 @@ curl http://localhost:11434/api/show -d '{
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"license": "<contents of license block>",
|
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSISTANT:\"",
|
||||||
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llama2:latest\n\nFROM /Users/username/.ollama/models/blobs/sha256:8daa9615cce30c259a9555b1cc250d461d1bc69980a274b44d7eda0be78076d8\nTEMPLATE \"\"\"[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] \"\"\"\nSYSTEM \"\"\"\"\"\"\nPARAMETER stop [INST]\nPARAMETER stop [/INST]\nPARAMETER stop <<SYS>>\nPARAMETER stop <</SYS>>\n",
|
"parameters": "num_ctx 4096\nstop \u003c/s\u003e\nstop USER:\nstop ASSISTANT:",
|
||||||
"parameters": "stop [INST]\nstop [/INST]\nstop <<SYS>>\nstop <</SYS>>",
|
"template": "{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: ",
|
||||||
"template": "[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] "
|
"details": {
|
||||||
|
"format": "gguf",
|
||||||
|
"family": "llama",
|
||||||
|
"families": ["llama", "clip"],
|
||||||
|
"parameter_size": "7B",
|
||||||
|
"quantization_level": "Q4_0"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -456,14 +824,14 @@ Copy a model. Creates a model with another name from an existing model.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/copy -d '{
|
curl http://localhost:11434/api/copy -d '{
|
||||||
"source": "llama2",
|
"source": "llama3",
|
||||||
"destination": "llama2-backup"
|
"destination": "llama3-backup"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Response
|
#### Response
|
||||||
|
|
||||||
The only response is a 200 OK if successful.
|
Returns a 200 OK if successful, or a 404 Not Found if the source model doesn't exist.
|
||||||
|
|
||||||
## Delete a Model
|
## Delete a Model
|
||||||
|
|
||||||
@@ -483,13 +851,13 @@ Delete a model and its data.
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -X DELETE http://localhost:11434/api/delete -d '{
|
curl -X DELETE http://localhost:11434/api/delete -d '{
|
||||||
"name": "llama2:13b"
|
"name": "llama3:13b"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Response
|
#### Response
|
||||||
|
|
||||||
If successful, the only response is a 200 OK.
|
Returns a 200 OK if successful, 404 Not Found if the model to be deleted doesn't exist.
|
||||||
|
|
||||||
## Pull a Model
|
## Pull a Model
|
||||||
|
|
||||||
@@ -511,7 +879,7 @@ Download a model from the ollama library. Cancelled pulls are resumed from where
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/pull -d '{
|
curl http://localhost:11434/api/pull -d '{
|
||||||
"name": "llama2"
|
"name": "llama3"
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -644,6 +1012,7 @@ Generate embeddings from a model
|
|||||||
Advanced parameters:
|
Advanced parameters:
|
||||||
|
|
||||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||||
|
- `keep_alive`: controls how long the model will stay loaded into memory following the request (default: `5m`)
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
@@ -651,7 +1020,7 @@ Advanced parameters:
|
|||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl http://localhost:11434/api/embeddings -d '{
|
curl http://localhost:11434/api/embeddings -d '{
|
||||||
"model": "llama2",
|
"model": "all-minilm",
|
||||||
"prompt": "Here is an article about llamas..."
|
"prompt": "Here is an article about llamas..."
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
@@ -666,3 +1035,47 @@ curl http://localhost:11434/api/embeddings -d '{
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## List Running Models
|
||||||
|
```shell
|
||||||
|
GET /api/ps
|
||||||
|
```
|
||||||
|
|
||||||
|
List models that are currently loaded into memory.
|
||||||
|
|
||||||
|
#### Examples
|
||||||
|
|
||||||
|
### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/ps
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Response
|
||||||
|
|
||||||
|
A single JSON object will be returned.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"models": [
|
||||||
|
{
|
||||||
|
"name": "mistral:latest",
|
||||||
|
"model": "mistral:latest",
|
||||||
|
"size": 5137025024,
|
||||||
|
"digest": "2ae6f6dd7a3dd734790bbbf58b8909a606e0e7e97e94b7604e0aa7ae4490e6d8",
|
||||||
|
"details": {
|
||||||
|
"parent_model": "",
|
||||||
|
"format": "gguf",
|
||||||
|
"family": "llama",
|
||||||
|
"families": [
|
||||||
|
"llama"
|
||||||
|
],
|
||||||
|
"parameter_size": "7.2B",
|
||||||
|
"quantization_level": "Q4_0"
|
||||||
|
},
|
||||||
|
"expires_at": "2024-06-04T14:38:31.83753-07:00",
|
||||||
|
"size_vram": 5137025024
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|||||||
@@ -1,20 +1,28 @@
|
|||||||
# Development
|
# Development
|
||||||
|
|
||||||
- Install cmake or (optionally, required tools for GPUs)
|
|
||||||
- run `go generate ./...`
|
|
||||||
- run `go build .`
|
|
||||||
|
|
||||||
Install required tools:
|
Install required tools:
|
||||||
|
|
||||||
- cmake version 3.24 or higher
|
- cmake version 3.24 or higher
|
||||||
- go version 1.20 or higher
|
- go version 1.22 or higher
|
||||||
- gcc version 11.4.0 or higher
|
- gcc version 11.4.0 or higher
|
||||||
|
|
||||||
|
### MacOS
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
brew install go cmake gcc
|
brew install go cmake gcc
|
||||||
```
|
```
|
||||||
|
|
||||||
Get the required libraries:
|
Optionally enable debugging and more verbose logging:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# At build time
|
||||||
|
export CGO_CFLAGS="-g"
|
||||||
|
|
||||||
|
# At runtime
|
||||||
|
export OLLAMA_DEBUG=1
|
||||||
|
```
|
||||||
|
|
||||||
|
Get the required libraries and build the native LLM code:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
go generate ./...
|
go generate ./...
|
||||||
@@ -32,8 +40,108 @@ Now you can run `ollama`:
|
|||||||
./ollama
|
./ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
## Building on Linux with GPU support
|
### Linux
|
||||||
|
|
||||||
- Install cmake and nvidia-cuda-toolkit
|
#### Linux CUDA (NVIDIA)
|
||||||
- run `go generate ./...`
|
|
||||||
- run `go build .`
|
_Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!_
|
||||||
|
|
||||||
|
Install `cmake` and `golang` as well as [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads)
|
||||||
|
development and runtime packages.
|
||||||
|
|
||||||
|
Typically the build scripts will auto-detect CUDA, however, if your Linux distro
|
||||||
|
or installation approach uses unusual paths, you can specify the location by
|
||||||
|
specifying an environment variable `CUDA_LIB_DIR` to the location of the shared
|
||||||
|
libraries, and `CUDACXX` to the location of the nvcc compiler. You can customize
|
||||||
|
a set of target CUDA architectures by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "50;60;70")
|
||||||
|
|
||||||
|
Then generate dependencies:
|
||||||
|
|
||||||
|
```
|
||||||
|
go generate ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
Then build the binary:
|
||||||
|
|
||||||
|
```
|
||||||
|
go build .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Linux ROCm (AMD)
|
||||||
|
|
||||||
|
_Your operating system distribution may already have packages for AMD ROCm and CLBlast. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!_
|
||||||
|
|
||||||
|
Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/) development packages first, as well as `cmake` and `golang`.
|
||||||
|
|
||||||
|
Typically the build scripts will auto-detect ROCm, however, if your Linux distro
|
||||||
|
or installation approach uses unusual paths, you can specify the location by
|
||||||
|
specifying an environment variable `ROCM_PATH` to the location of the ROCm
|
||||||
|
install (typically `/opt/rocm`), and `CLBlast_DIR` to the location of the
|
||||||
|
CLBlast install (typically `/usr/lib/cmake/CLBlast`). You can also customize
|
||||||
|
the AMD GPU targets by setting AMDGPU_TARGETS (e.g. `AMDGPU_TARGETS="gfx1101;gfx1102"`)
|
||||||
|
|
||||||
|
```
|
||||||
|
go generate ./...
|
||||||
|
```
|
||||||
|
|
||||||
|
Then build the binary:
|
||||||
|
|
||||||
|
```
|
||||||
|
go build .
|
||||||
|
```
|
||||||
|
|
||||||
|
ROCm requires elevated privileges to access the GPU at runtime. On most distros you can add your user account to the `render` group, or run as root.
|
||||||
|
|
||||||
|
#### Advanced CPU Settings
|
||||||
|
|
||||||
|
By default, running `go generate ./...` will compile a few different variations
|
||||||
|
of the LLM library based on common CPU families and vector math capabilities,
|
||||||
|
including a lowest-common-denominator which should run on almost any 64 bit CPU
|
||||||
|
somewhat slowly. At runtime, Ollama will auto-detect the optimal variation to
|
||||||
|
load. If you would like to build a CPU-based build customized for your
|
||||||
|
processor, you can set `OLLAMA_CUSTOM_CPU_DEFS` to the llama.cpp flags you would
|
||||||
|
like to use. For example, to compile an optimized binary for an Intel i9-9880H,
|
||||||
|
you might use:
|
||||||
|
|
||||||
|
```
|
||||||
|
OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./...
|
||||||
|
go build .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Containerized Linux Build
|
||||||
|
|
||||||
|
If you have Docker available, you can build linux binaries with `./scripts/build_linux.sh` which has the CUDA and ROCm dependencies included. The resulting binary is placed in `./dist`
|
||||||
|
|
||||||
|
### Windows
|
||||||
|
|
||||||
|
Note: The windows build for Ollama is still under development.
|
||||||
|
|
||||||
|
Install required tools:
|
||||||
|
|
||||||
|
- MSVC toolchain - C/C++ and cmake as minimal requirements
|
||||||
|
- Go version 1.22 or higher
|
||||||
|
- MinGW (pick one variant) with GCC.
|
||||||
|
- [MinGW-w64](https://www.mingw-w64.org/)
|
||||||
|
- [MSYS2](https://www.msys2.org/)
|
||||||
|
|
||||||
|
```powershell
|
||||||
|
$env:CGO_ENABLED="1"
|
||||||
|
go generate ./...
|
||||||
|
go build .
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Windows CUDA (NVIDIA)
|
||||||
|
|
||||||
|
In addition to the common Windows development tools described above, install CUDA after installing MSVC.
|
||||||
|
|
||||||
|
- [NVIDIA CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html)
|
||||||
|
|
||||||
|
|
||||||
|
#### Windows ROCm (AMD Radeon)
|
||||||
|
|
||||||
|
In addition to the common Windows development tools described above, install AMDs HIP package after installing MSVC.
|
||||||
|
|
||||||
|
- [AMD HIP](https://www.amd.com/en/developer/resources/rocm-hub/hip-sdk.html)
|
||||||
|
- [Strawberry Perl](https://strawberryperl.com/)
|
||||||
|
|
||||||
|
Lastly, add `ninja.exe` included with MSVC to the system path (e.g. `C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja`).
|
||||||
|
|||||||
71
docs/docker.md
Normal file
71
docs/docker.md
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Ollama Docker image
|
||||||
|
|
||||||
|
### CPU only
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
### Nvidia GPU
|
||||||
|
Install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installation).
|
||||||
|
|
||||||
|
#### Install with Apt
|
||||||
|
1. Configure the repository
|
||||||
|
```bash
|
||||||
|
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \
|
||||||
|
| sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
||||||
|
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
|
||||||
|
| sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \
|
||||||
|
| sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||||
|
sudo apt-get update
|
||||||
|
```
|
||||||
|
2. Install the NVIDIA Container Toolkit packages
|
||||||
|
```bash
|
||||||
|
sudo apt-get install -y nvidia-container-toolkit
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Install with Yum or Dnf
|
||||||
|
1. Configure the repository
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo \
|
||||||
|
| sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install the NVIDIA Container Toolkit packages
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo yum install -y nvidia-container-toolkit
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Configure Docker to use Nvidia driver
|
||||||
|
```
|
||||||
|
sudo nvidia-ctk runtime configure --runtime=docker
|
||||||
|
sudo systemctl restart docker
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Start the container
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -d --gpus=all -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
### AMD GPU
|
||||||
|
|
||||||
|
To run Ollama using Docker with AMD GPUs, use the `rocm` tag and the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 11434:11434 --name ollama ollama/ollama:rocm
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run model locally
|
||||||
|
|
||||||
|
Now you can run a model:
|
||||||
|
|
||||||
|
```
|
||||||
|
docker exec -it ollama ollama run llama3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Try different models
|
||||||
|
|
||||||
|
More models can be found on the [Ollama library](https://ollama.com/library).
|
||||||
319
docs/faq.md
319
docs/faq.md
@@ -1,138 +1,123 @@
|
|||||||
# FAQ
|
# FAQ
|
||||||
|
|
||||||
|
## How can I upgrade Ollama?
|
||||||
|
|
||||||
|
Ollama on macOS and Windows will automatically download updates. Click on the taskbar or menubar item and then click "Restart to update" to apply the update. Updates can also be installed by downloading the latest version [manually](https://ollama.com/download/).
|
||||||
|
|
||||||
|
On Linux, re-run the install script:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
|
```
|
||||||
|
|
||||||
## How can I view the logs?
|
## How can I view the logs?
|
||||||
|
|
||||||
On macOS:
|
Review the [Troubleshooting](./troubleshooting.md) docs for more about using logs.
|
||||||
|
|
||||||
|
## Is my GPU compatible with Ollama?
|
||||||
|
|
||||||
|
Please refer to the [GPU docs](./gpu.md).
|
||||||
|
|
||||||
|
## How can I specify the context window size?
|
||||||
|
|
||||||
|
By default, Ollama uses a context window size of 2048 tokens.
|
||||||
|
|
||||||
|
To change this when using `ollama run`, use `/set parameter`:
|
||||||
|
|
||||||
```
|
```
|
||||||
cat ~/.ollama/logs/server.log
|
/set parameter num_ctx 4096
|
||||||
```
|
```
|
||||||
|
|
||||||
On Linux:
|
When using the API, specify the `num_ctx` parameter:
|
||||||
|
|
||||||
```
|
|
||||||
journalctl -u ollama
|
|
||||||
```
|
|
||||||
|
|
||||||
If you're running `ollama serve` directly, the logs will be printed to the console.
|
|
||||||
|
|
||||||
## How can I expose Ollama on my network?
|
|
||||||
|
|
||||||
Ollama binds to 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
|
|
||||||
|
|
||||||
On macOS:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
OLLAMA_HOST=0.0.0.0:11434 ollama serve
|
|
||||||
```
|
|
||||||
|
|
||||||
On Linux:
|
|
||||||
|
|
||||||
Create a `systemd` drop-in directory and set `Environment=OLLAMA_HOST`
|
|
||||||
|
|
||||||
```bash
|
|
||||||
mkdir -p /etc/systemd/system/ollama.service.d
|
|
||||||
echo '[Service]' >>/etc/systemd/system/ollama.service.d/environment.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
```bash
|
|
||||||
echo 'Environment="OLLAMA_HOST=0.0.0.0:11434"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
Reload `systemd` and restart Ollama:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart ollama
|
|
||||||
```
|
|
||||||
|
|
||||||
## How can I allow additional web origins to access Ollama?
|
|
||||||
|
|
||||||
Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0` by default. Add additional origins with the `OLLAMA_ORIGINS` environment variable:
|
|
||||||
|
|
||||||
On macOS:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com ollama serve
|
|
||||||
```
|
|
||||||
|
|
||||||
On Linux:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
echo 'Environment="OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
Reload `systemd` and restart Ollama:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart ollama
|
|
||||||
```
|
|
||||||
|
|
||||||
## Where are models stored?
|
|
||||||
|
|
||||||
- macOS: Raw model data is stored under `~/.ollama/models`.
|
|
||||||
- Linux: Raw model data is stored under `/usr/share/ollama/.ollama/models`
|
|
||||||
|
|
||||||
Below the models directory you will find a structure similar to the following:
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
.
|
curl http://localhost:11434/api/generate -d '{
|
||||||
├── blobs
|
"model": "llama3",
|
||||||
└── manifests
|
"prompt": "Why is the sky blue?",
|
||||||
└── registry.ollama.ai
|
"options": {
|
||||||
├── f0rodo
|
"num_ctx": 4096
|
||||||
├── library
|
}
|
||||||
├── mattw
|
}'
|
||||||
└── saikatkumardey
|
|
||||||
```
|
```
|
||||||
|
|
||||||
There is a `manifests/registry.ollama.ai/namespace` path. In example above, the user has downloaded models from the official `library`, `f0rodo`, `mattw`, and `saikatkumardey` namespaces. Within each of those directories, you will find directories for each of the models downloaded. And in there you will find a file name representing each tag. Each tag file is the manifest for the model.
|
## How can I tell if my model was loaded onto the GPU?
|
||||||
|
|
||||||
The manifest lists all the layers used in this model. You will see a `media type` for each layer, along with a digest. That digest corresponds with a file in the `models/blobs directory`.
|
Use the `ollama ps` command to see what models are currently loaded into memory.
|
||||||
|
|
||||||
### How can I change where Ollama stores models?
|
```shell
|
||||||
|
ollama ps
|
||||||
|
NAME ID SIZE PROCESSOR UNTIL
|
||||||
|
llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now
|
||||||
|
```
|
||||||
|
|
||||||
To modify where models are stored, you can use the `OLLAMA_MODELS` environment variable. Note that on Linux this means defining `OLLAMA_MODELS` in a drop-in `/etc/systemd/system/ollama.service.d` service file, reloading systemd, and restarting the ollama service.
|
The `Processor` column will show which memory the model was loaded in to:
|
||||||
|
* `100% GPU` means the model was loaded entirely into the GPU
|
||||||
|
* `100% CPU` means the model was loaded entirely in system memory
|
||||||
|
* `48%/52% CPU/GPU` means the model was loaded partially onto both the GPU and into system memory
|
||||||
|
|
||||||
## Does Ollama send my prompts and answers back to Ollama.ai to use in any way?
|
## How do I configure Ollama server?
|
||||||
|
|
||||||
No. Anything you do with Ollama, such as generate a response from the model, stays with you. We don't collect any data about how you use the model. You are always in control of your own data.
|
Ollama server can be configured with environment variables.
|
||||||
|
|
||||||
## How can I use Ollama in Visual Studio Code?
|
### Setting environment variables on Mac
|
||||||
|
|
||||||
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. You can see the list of [extensions & plugins](https://github.com/jmorganca/ollama#extensions--plugins) at the bottom of the main repository readme.
|
If Ollama is run as a macOS application, environment variables should be set using `launchctl`:
|
||||||
|
|
||||||
|
1. For each environment variable, call `launchctl setenv`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
launchctl setenv OLLAMA_HOST "0.0.0.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Restart Ollama application.
|
||||||
|
|
||||||
|
### Setting environment variables on Linux
|
||||||
|
|
||||||
|
If Ollama is run as a systemd service, environment variables should be set using `systemctl`:
|
||||||
|
|
||||||
|
1. Edit the systemd service by calling `systemctl edit ollama.service`. This will open an editor.
|
||||||
|
|
||||||
|
2. For each environment variable, add a line `Environment` under section `[Service]`:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Service]
|
||||||
|
Environment="OLLAMA_HOST=0.0.0.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Save and exit.
|
||||||
|
|
||||||
|
4. Reload `systemd` and restart Ollama:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl restart ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
### Setting environment variables on Windows
|
||||||
|
|
||||||
|
On Windows, Ollama inherits your user and system environment variables.
|
||||||
|
|
||||||
|
1. First Quit Ollama by clicking on it in the task bar.
|
||||||
|
|
||||||
|
2. Start the Settings (Windows 11) or Control Panel (Windows 10) application and search for _environment variables_.
|
||||||
|
|
||||||
|
3. Click on _Edit environment variables for your account_.
|
||||||
|
|
||||||
|
4. Edit or create a new variable for your user account for `OLLAMA_HOST`, `OLLAMA_MODELS`, etc.
|
||||||
|
|
||||||
|
5. Click OK/Apply to save.
|
||||||
|
|
||||||
|
6. Start the Ollama application from the Windows Start menu.
|
||||||
|
|
||||||
## How do I use Ollama behind a proxy?
|
## How do I use Ollama behind a proxy?
|
||||||
|
|
||||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values.
|
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
||||||
|
|
||||||
When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate.
|
|
||||||
|
|
||||||
On macOS:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
HTTPS_PROXY=http://proxy.example.com ollama serve
|
|
||||||
```
|
|
||||||
|
|
||||||
On Linux:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
echo 'Environment="HTTPS_PROXY=https://proxy.example.com"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
Reload `systemd` and restart Ollama:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl restart ollama
|
|
||||||
```
|
|
||||||
|
|
||||||
### How do I use Ollama behind a proxy in Docker?
|
### How do I use Ollama behind a proxy in Docker?
|
||||||
|
|
||||||
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
||||||
|
|
||||||
Alternatively, Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||||
|
|
||||||
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
||||||
|
|
||||||
@@ -149,8 +134,126 @@ docker build -t ollama-with-ca .
|
|||||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Does Ollama send my prompts and answers back to ollama.com?
|
||||||
|
|
||||||
|
No. Ollama runs locally, and conversation data does not leave your machine.
|
||||||
|
|
||||||
|
## How can I expose Ollama on my network?
|
||||||
|
|
||||||
|
Ollama binds 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
|
||||||
|
|
||||||
|
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||||
|
|
||||||
|
## How can I use Ollama with a proxy server?
|
||||||
|
|
||||||
|
Ollama runs an HTTP server and can be exposed using a proxy server such as Nginx. To do so, configure the proxy to forward requests and optionally set required headers (if not exposing Ollama on the network). For example, with Nginx:
|
||||||
|
|
||||||
|
```
|
||||||
|
server {
|
||||||
|
listen 80;
|
||||||
|
server_name example.com; # Replace with your domain or IP
|
||||||
|
location / {
|
||||||
|
proxy_pass http://localhost:11434;
|
||||||
|
proxy_set_header Host localhost:11434;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## How can I use Ollama with ngrok?
|
||||||
|
|
||||||
|
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ngrok http 11434 --host-header="localhost:11434"
|
||||||
|
```
|
||||||
|
|
||||||
|
## How can I use Ollama with Cloudflare Tunnel?
|
||||||
|
|
||||||
|
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
|
||||||
|
```
|
||||||
|
|
||||||
|
## How can I allow additional web origins to access Ollama?
|
||||||
|
|
||||||
|
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Additional origins can be configured with `OLLAMA_ORIGINS`.
|
||||||
|
|
||||||
|
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||||
|
|
||||||
|
## Where are models stored?
|
||||||
|
|
||||||
|
- macOS: `~/.ollama/models`
|
||||||
|
- Linux: `/usr/share/ollama/.ollama/models`
|
||||||
|
- Windows: `C:\Users\%username%\.ollama\models`
|
||||||
|
|
||||||
|
### How do I set them to a different location?
|
||||||
|
|
||||||
|
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory.
|
||||||
|
|
||||||
|
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||||
|
|
||||||
|
## How can I use Ollama in Visual Studio Code?
|
||||||
|
|
||||||
|
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
|
||||||
|
|
||||||
## How do I use Ollama with GPU acceleration in Docker?
|
## How do I use Ollama with GPU acceleration in Docker?
|
||||||
|
|
||||||
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
||||||
|
|
||||||
GPU acceleration is not available for Docker Desktop in macOS due to the lack of GPU passthrough and emulation.
|
GPU acceleration is not available for Docker Desktop in macOS due to the lack of GPU passthrough and emulation.
|
||||||
|
|
||||||
|
## Why is networking slow in WSL2 on Windows 10?
|
||||||
|
|
||||||
|
This can impact both installing Ollama, as well as downloading models.
|
||||||
|
|
||||||
|
Open `Control Panel > Networking and Internet > View network status and tasks` and click on `Change adapter settings` on the left panel. Find the `vEthernel (WSL)` adapter, right click and select `Properties`.
|
||||||
|
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
||||||
|
properties.
|
||||||
|
|
||||||
|
## How can I preload a model into Ollama to get faster response times?
|
||||||
|
|
||||||
|
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
||||||
|
|
||||||
|
To preload the mistral model using the generate endpoint, use:
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/generate -d '{"model": "mistral"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
To use the chat completions endpoint, use:
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
To preload a model using the CLI, use the command:
|
||||||
|
```shell
|
||||||
|
ollama run llama3 ""
|
||||||
|
```
|
||||||
|
|
||||||
|
## How do I keep a model loaded in memory or make it unload immediately?
|
||||||
|
|
||||||
|
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
||||||
|
|
||||||
|
The `keep_alive` parameter can be set to:
|
||||||
|
* a duration string (such as "10m" or "24h")
|
||||||
|
* a number in seconds (such as 3600)
|
||||||
|
* any negative number which will keep the model loaded in memory (e.g. -1 or "-1m")
|
||||||
|
* '0' which will unload the model immediately after generating a response
|
||||||
|
|
||||||
|
For example, to preload a model and leave it in memory use:
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/generate -d '{"model": "llama3", "keep_alive": -1}'
|
||||||
|
```
|
||||||
|
|
||||||
|
To unload the model and free up memory use:
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/generate -d '{"model": "llama3", "keep_alive": 0}'
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, you can change the amount of time all models are loaded into memory by setting the `OLLAMA_KEEP_ALIVE` environment variable when starting the Ollama server. The `OLLAMA_KEEP_ALIVE` variable uses the same parameter types as the `keep_alive` parameter types mentioned above. Refer to section explaining [how to configure the Ollama server](#how-do-i-configure-ollama-server) to correctly set the environment variable.
|
||||||
|
|
||||||
|
If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints.
|
||||||
|
|
||||||
|
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||||
|
|
||||||
|
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||||
|
|||||||
102
docs/gpu.md
Normal file
102
docs/gpu.md
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# GPU
|
||||||
|
## Nvidia
|
||||||
|
Ollama supports Nvidia GPUs with compute capability 5.0+.
|
||||||
|
|
||||||
|
Check your compute compatibility to see if your card is supported:
|
||||||
|
[https://developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus)
|
||||||
|
|
||||||
|
| Compute Capability | Family | Cards |
|
||||||
|
| ------------------ | ------------------- | ----------------------------------------------------------------------------------------------------------- |
|
||||||
|
| 9.0 | NVIDIA | `H100` |
|
||||||
|
| 8.9 | GeForce RTX 40xx | `RTX 4090` `RTX 4080` `RTX 4070 Ti` `RTX 4060 Ti` |
|
||||||
|
| | NVIDIA Professional | `L4` `L40` `RTX 6000` |
|
||||||
|
| 8.6 | GeForce RTX 30xx | `RTX 3090 Ti` `RTX 3090` `RTX 3080 Ti` `RTX 3080` `RTX 3070 Ti` `RTX 3070` `RTX 3060 Ti` `RTX 3060` |
|
||||||
|
| | NVIDIA Professional | `A40` `RTX A6000` `RTX A5000` `RTX A4000` `RTX A3000` `RTX A2000` `A10` `A16` `A2` |
|
||||||
|
| 8.0 | NVIDIA | `A100` `A30` |
|
||||||
|
| 7.5 | GeForce GTX/RTX | `GTX 1650 Ti` `TITAN RTX` `RTX 2080 Ti` `RTX 2080` `RTX 2070` `RTX 2060` |
|
||||||
|
| | NVIDIA Professional | `T4` `RTX 5000` `RTX 4000` `RTX 3000` `T2000` `T1200` `T1000` `T600` `T500` |
|
||||||
|
| | Quadro | `RTX 8000` `RTX 6000` `RTX 5000` `RTX 4000` |
|
||||||
|
| 7.0 | NVIDIA | `TITAN V` `V100` `Quadro GV100` |
|
||||||
|
| 6.1 | NVIDIA TITAN | `TITAN Xp` `TITAN X` |
|
||||||
|
| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050` |
|
||||||
|
| | Quadro | `P6000` `P5200` `P4200` `P3200` `P5000` `P4000` `P3000` `P2200` `P2000` `P1000` `P620` `P600` `P500` `P520` |
|
||||||
|
| | Tesla | `P40` `P4` |
|
||||||
|
| 6.0 | NVIDIA | `Tesla P100` `Quadro GP100` |
|
||||||
|
| 5.2 | GeForce GTX | `GTX TITAN X` `GTX 980 Ti` `GTX 980` `GTX 970` `GTX 960` `GTX 950` |
|
||||||
|
| | Quadro | `M6000 24GB` `M6000` `M5000` `M5500M` `M4000` `M2200` `M2000` `M620` |
|
||||||
|
| | Tesla | `M60` `M40` |
|
||||||
|
| 5.0 | GeForce GTX | `GTX 750 Ti` `GTX 750` `NVS 810` |
|
||||||
|
| | Quadro | `K2200` `K1200` `K620` `M1200` `M520` `M5000M` `M4000M` `M3000M` `M2000M` `M1000M` `K620M` `M600M` `M500M` |
|
||||||
|
|
||||||
|
|
||||||
|
### GPU Selection
|
||||||
|
|
||||||
|
If you have multiple NVIDIA GPUs in your system and want to limit Ollama to use
|
||||||
|
a subset, you can set `CUDA_VISIBLE_DEVICES` to a comma separated list of GPUs.
|
||||||
|
Numeric IDs may be used, however ordering may vary, so UUIDs are more reliable.
|
||||||
|
You can discover the UUID of your GPUs by running `nvidia-smi -L` If you want to
|
||||||
|
ignore the GPUs and force CPU usage, use an invalid GPU ID (e.g., "-1")
|
||||||
|
|
||||||
|
### Laptop Suspend Resume
|
||||||
|
|
||||||
|
On linux, after a suspend/resume cycle, sometimes Ollama will fail to discover
|
||||||
|
your NVIDIA GPU, and fallback to running on the CPU. You can workaround this
|
||||||
|
driver bug by reloading the NVIDIA UVM driver with `sudo rmmod nvidia_uvm &&
|
||||||
|
sudo modprobe nvidia_uvm`
|
||||||
|
|
||||||
|
## AMD Radeon
|
||||||
|
Ollama supports the following AMD GPUs:
|
||||||
|
| Family | Cards and accelerators |
|
||||||
|
| -------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||||
|
| AMD Radeon RX | `7900 XTX` `7900 XT` `7900 GRE` `7800 XT` `7700 XT` `7600 XT` `7600` `6950 XT` `6900 XTX` `6900XT` `6800 XT` `6800` `Vega 64` `Vega 56` |
|
||||||
|
| AMD Radeon PRO | `W7900` `W7800` `W7700` `W7600` `W7500` `W6900X` `W6800X Duo` `W6800X` `W6800` `V620` `V420` `V340` `V320` `Vega II Duo` `Vega II` `VII` `SSG` |
|
||||||
|
| AMD Instinct | `MI300X` `MI300A` `MI300` `MI250X` `MI250` `MI210` `MI200` `MI100` `MI60` `MI50` |
|
||||||
|
|
||||||
|
### Overrides
|
||||||
|
Ollama leverages the AMD ROCm library, which does not support all AMD GPUs. In
|
||||||
|
some cases you can force the system to try to use a similar LLVM target that is
|
||||||
|
close. For example The Radeon RX 5400 is `gfx1034` (also known as 10.3.4)
|
||||||
|
however, ROCm does not currently support this target. The closest support is
|
||||||
|
`gfx1030`. You can use the environment variable `HSA_OVERRIDE_GFX_VERSION` with
|
||||||
|
`x.y.z` syntax. So for example, to force the system to run on the RX 5400, you
|
||||||
|
would set `HSA_OVERRIDE_GFX_VERSION="10.3.0"` as an environment variable for the
|
||||||
|
server. If you have an unsupported AMD GPU you can experiment using the list of
|
||||||
|
supported types below.
|
||||||
|
|
||||||
|
At this time, the known supported GPU types are the following LLVM Targets.
|
||||||
|
This table shows some example GPUs that map to these LLVM targets:
|
||||||
|
| **LLVM Target** | **An Example GPU** |
|
||||||
|
|-----------------|---------------------|
|
||||||
|
| gfx900 | Radeon RX Vega 56 |
|
||||||
|
| gfx906 | Radeon Instinct MI50 |
|
||||||
|
| gfx908 | Radeon Instinct MI100 |
|
||||||
|
| gfx90a | Radeon Instinct MI210 |
|
||||||
|
| gfx940 | Radeon Instinct MI300 |
|
||||||
|
| gfx941 | |
|
||||||
|
| gfx942 | |
|
||||||
|
| gfx1030 | Radeon PRO V620 |
|
||||||
|
| gfx1100 | Radeon PRO W7900 |
|
||||||
|
| gfx1101 | Radeon PRO W7700 |
|
||||||
|
| gfx1102 | Radeon RX 7600 |
|
||||||
|
|
||||||
|
AMD is working on enhancing ROCm v6 to broaden support for families of GPUs in a
|
||||||
|
future release which should increase support for more GPUs.
|
||||||
|
|
||||||
|
Reach out on [Discord](https://discord.gg/ollama) or file an
|
||||||
|
[issue](https://github.com/ollama/ollama/issues) for additional help.
|
||||||
|
|
||||||
|
### GPU Selection
|
||||||
|
|
||||||
|
If you have multiple AMD GPUs in your system and want to limit Ollama to use a
|
||||||
|
subset, you can set `HIP_VISIBLE_DEVICES` to a comma separated list of GPUs.
|
||||||
|
You can see the list of devices with `rocminfo`. If you want to ignore the GPUs
|
||||||
|
and force CPU usage, use an invalid GPU ID (e.g., "-1")
|
||||||
|
|
||||||
|
### Container Permission
|
||||||
|
|
||||||
|
In some Linux distributions, SELinux can prevent containers from
|
||||||
|
accessing the AMD GPU devices. On the host system you can run
|
||||||
|
`sudo setsebool container_use_devices=1` to allow containers to use devices.
|
||||||
|
|
||||||
|
### Metal (Apple GPUs)
|
||||||
|
Ollama supports GPU acceleration on Apple devices via the Metal API.
|
||||||
259
docs/import.md
259
docs/import.md
@@ -1,198 +1,99 @@
|
|||||||
# Import a model
|
# Import
|
||||||
|
|
||||||
This guide walks through importing a GGUF, PyTorch or Safetensors model.
|
GGUF models and select Safetensors models can be imported directly into Ollama.
|
||||||
|
|
||||||
## Importing (GGUF)
|
## Import GGUF
|
||||||
|
|
||||||
### Step 1: Write a `Modelfile`
|
A binary GGUF file can be imported directly into Ollama through a Modelfile.
|
||||||
|
|
||||||
Start by creating a `Modelfile`. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more.
|
```dockerfile
|
||||||
|
FROM /path/to/file.gguf
|
||||||
```
|
|
||||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
|
||||||
```
|
```
|
||||||
|
|
||||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
## Import Safetensors
|
||||||
|
|
||||||
```
|
If the model being imported is one of these architectures, it can be imported directly into Ollama through a Modelfile:
|
||||||
FROM ./q4_0.bin
|
|
||||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
- LlamaForCausalLM
|
||||||
|
- MistralForCausalLM
|
||||||
|
- GemmaForCausalLM
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM /path/to/safetensors/directory
|
||||||
```
|
```
|
||||||
|
|
||||||
### Step 2: Create the Ollama model
|
For architectures not directly convertable by Ollama, see llama.cpp's [guide](https://github.com/ggerganov/llama.cpp/blob/master/README.md#prepare-and-quantize) on conversion. After conversion, see [Import GGUF](#import-gguf).
|
||||||
|
|
||||||
Finally, create a model from your `Modelfile`:
|
## Automatic Quantization
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Automatic quantization requires v0.1.35 or higher.
|
||||||
|
|
||||||
|
Ollama is capable of quantizing FP16 or FP32 models to any of the supported quantizations with the `-q/--quantize` flag in `ollama create`.
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM /path/to/my/gemma/f16/model
|
||||||
```
|
```
|
||||||
ollama create example -f Modelfile
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Run your model
|
|
||||||
|
|
||||||
Next, test the model with `ollama run`:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama run example "What is your favourite condiment?"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Importing (PyTorch & Safetensors)
|
|
||||||
|
|
||||||
### Supported models
|
|
||||||
|
|
||||||
Ollama supports a set of model architectures, with support for more coming soon:
|
|
||||||
|
|
||||||
- Llama & Mistral
|
|
||||||
- Falcon & RW
|
|
||||||
- GPT-NeoX
|
|
||||||
- BigCode
|
|
||||||
|
|
||||||
To view a model's architecture, check the `config.json` file in its HuggingFace repo. You should see an entry under `architectures` (e.g. `LlamaForCausalLM`).
|
|
||||||
|
|
||||||
### Step 1: Clone the HuggingFace repository (optional)
|
|
||||||
|
|
||||||
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
|
||||||
|
|
||||||
```
|
|
||||||
git lfs install
|
|
||||||
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
|
|
||||||
cd Mistral-7B-Instruct-v0.1
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Convert and quantize to a `.bin` file (optional, for PyTorch and Safetensors)
|
|
||||||
|
|
||||||
If the model is in PyTorch or Safetensors format, a [Docker image](https://hub.docker.com/r/ollama/quantize) with the tooling required to convert and quantize models is available.
|
|
||||||
|
|
||||||
First, Install [Docker](https://www.docker.com/get-started/).
|
|
||||||
|
|
||||||
Next, to convert and quantize your model, run:
|
|
||||||
|
|
||||||
```
|
|
||||||
docker run --rm -v .:/model ollama/quantize -q q4_0 /model
|
|
||||||
```
|
|
||||||
|
|
||||||
This will output two files into the directory:
|
|
||||||
|
|
||||||
- `f16.bin`: the model converted to GGUF
|
|
||||||
- `q4_0.bin` the model quantized to a 4-bit quantization (we will use this file to create the Ollama model)
|
|
||||||
|
|
||||||
### Step 3: Write a `Modelfile`
|
|
||||||
|
|
||||||
Next, create a `Modelfile` for your model:
|
|
||||||
|
|
||||||
```
|
|
||||||
FROM ./q4_0.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
|
||||||
|
|
||||||
```
|
|
||||||
FROM ./q4_0.bin
|
|
||||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Create the Ollama model
|
|
||||||
|
|
||||||
Finally, create a model from your `Modelfile`:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama create example -f Modelfile
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 5: Run your model
|
|
||||||
|
|
||||||
Next, test the model with `ollama run`:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama run example "What is your favourite condiment?"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Publishing your model (optional – early alpha)
|
|
||||||
|
|
||||||
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
|
||||||
|
|
||||||
1. Create [an account](https://ollama.ai/signup)
|
|
||||||
2. Run `cat ~/.ollama/id_ed25519.pub` to view your Ollama public key. Copy this to the clipboard.
|
|
||||||
3. Add your public key to your [Ollama account](https://ollama.ai/settings/keys)
|
|
||||||
|
|
||||||
Next, copy your model to your username's namespace:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama cp example <your username>/example
|
|
||||||
```
|
|
||||||
|
|
||||||
Then push the model:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama push <your username>/example
|
|
||||||
```
|
|
||||||
|
|
||||||
After publishing, your model will be available at `https://ollama.ai/<your username>/example`.
|
|
||||||
|
|
||||||
## Quantization reference
|
|
||||||
|
|
||||||
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
|
|
||||||
|
|
||||||
- `q2_K`
|
|
||||||
- `q3_K`
|
|
||||||
- `q3_K_S`
|
|
||||||
- `q3_K_M`
|
|
||||||
- `q3_K_L`
|
|
||||||
- `q4_0` (recommended)
|
|
||||||
- `q4_1`
|
|
||||||
- `q4_K`
|
|
||||||
- `q4_K_S`
|
|
||||||
- `q4_K_M`
|
|
||||||
- `q5_0`
|
|
||||||
- `q5_1`
|
|
||||||
- `q5_K`
|
|
||||||
- `q5_K_S`
|
|
||||||
- `q5_K_M`
|
|
||||||
- `q6_K`
|
|
||||||
- `q8_0`
|
|
||||||
|
|
||||||
## Manually converting & quantizing models
|
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
Start by cloning the `llama.cpp` repo to your machine in another directory:
|
|
||||||
|
|
||||||
```
|
|
||||||
git clone https://github.com/ggerganov/llama.cpp.git
|
|
||||||
cd llama.cpp
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, install the Python dependencies:
|
|
||||||
|
|
||||||
```
|
|
||||||
pip install -r requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally, build the `quantize` tool:
|
|
||||||
|
|
||||||
```
|
|
||||||
make quantize
|
|
||||||
```
|
|
||||||
|
|
||||||
### Convert the model
|
|
||||||
|
|
||||||
Run the correct conversion script for your model architecture:
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
# LlamaForCausalLM or MistralForCausalLM
|
$ ollama create -q Q4_K_M mymodel
|
||||||
python convert.py <path to model directory>
|
transferring model data
|
||||||
|
quantizing F16 model to Q4_K_M
|
||||||
# FalconForCausalLM
|
creating new layer sha256:735e246cc1abfd06e9cdcf95504d6789a6cd1ad7577108a70d9902fef503c1bd
|
||||||
python convert-falcon-hf-to-gguf.py <path to model directory>
|
creating new layer sha256:0853f0ad24e5865173bbf9ffcc7b0f5d56b66fd690ab1009867e45e7d2c4db0f
|
||||||
|
writing manifest
|
||||||
# GPTNeoXForCausalLM
|
success
|
||||||
python convert-gptneox-hf-to-gguf.py <path to model directory>
|
|
||||||
|
|
||||||
# GPTBigCodeForCausalLM
|
|
||||||
python convert-starcoder-hf-to-gguf.py <path to model directory>
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Quantize the model
|
### Supported Quantizations
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Legacy Quantization</summary>
|
||||||
|
|
||||||
|
- `Q4_0`
|
||||||
|
- `Q4_1`
|
||||||
|
- `Q5_0`
|
||||||
|
- `Q5_1`
|
||||||
|
- `Q8_0`
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>K-means Quantization</summary>`
|
||||||
|
|
||||||
|
- `Q3_K_S`
|
||||||
|
- `Q3_K_M`
|
||||||
|
- `Q3_K_L`
|
||||||
|
- `Q4_K_S`
|
||||||
|
- `Q4_K_M`
|
||||||
|
- `Q5_K_S`
|
||||||
|
- `Q5_K_M`
|
||||||
|
- `Q6_K`
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Activation-aware Weight Quantization (i.e. IQ) are not currently supported for automatic quantization however you can still import the quantized model into Ollama, see [Import GGUF](#import-gguf).
|
||||||
|
|
||||||
|
## Template Detection
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Template detection requires v0.1.42 or higher.
|
||||||
|
|
||||||
|
Ollama uses model metadata, specifically `tokenizer.chat_template`, to automatically create a template appropriate for the model you're importing.
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM /path/to/my/gemma/model
|
||||||
```
|
```
|
||||||
quantize <path to model dir>/ggml-model-f32.bin <path to model dir>/q4_0.bin q4_0
|
|
||||||
|
```shell
|
||||||
|
$ ollama create mymodel
|
||||||
|
transferring model data
|
||||||
|
using autodetected template gemma-instruct
|
||||||
|
creating new layer sha256:baa2a0edc27d19cc6b7537578a9a7ba1a4e3214dc185ed5ae43692b319af7b84
|
||||||
|
creating new layer sha256:ba66c3309914dbef07e5149a648fd1877f030d337a4f240d444ea335008943cb
|
||||||
|
writing manifest
|
||||||
|
success
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Defining a template in the Modelfile will disable this feature which may be useful if you want to use a different template than the autodetected one.
|
||||||
|
|||||||
@@ -3,11 +3,21 @@
|
|||||||
## Install
|
## Install
|
||||||
|
|
||||||
Install Ollama running this one-liner:
|
Install Ollama running this one-liner:
|
||||||
|
|
||||||
>
|
>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl https://ollama.ai/install.sh | sh
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## AMD Radeon GPU support
|
||||||
|
|
||||||
|
While AMD has contributed the `amdgpu` driver upstream to the official linux
|
||||||
|
kernel source, the version is older and may not support all ROCm features. We
|
||||||
|
recommend you install the latest driver from
|
||||||
|
https://www.amd.com/en/support/linux-drivers for best support of your Radeon
|
||||||
|
GPU.
|
||||||
|
|
||||||
## Manual install
|
## Manual install
|
||||||
|
|
||||||
### Download the `ollama` binary
|
### Download the `ollama` binary
|
||||||
@@ -15,7 +25,7 @@ curl https://ollama.ai/install.sh | sh
|
|||||||
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
|
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||||
sudo chmod +x /usr/bin/ollama
|
sudo chmod +x /usr/bin/ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -62,6 +72,11 @@ Verify that the drivers are installed by running the following command, which sh
|
|||||||
nvidia-smi
|
nvidia-smi
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Install ROCm (optional - for Radeon GPUs)
|
||||||
|
[Download and Install](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html)
|
||||||
|
|
||||||
|
Make sure to install ROCm v6
|
||||||
|
|
||||||
### Start Ollama
|
### Start Ollama
|
||||||
|
|
||||||
Start Ollama using `systemd`:
|
Start Ollama using `systemd`:
|
||||||
@@ -75,22 +90,32 @@ sudo systemctl start ollama
|
|||||||
Update ollama by running the install script again:
|
Update ollama by running the install script again:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl https://ollama.ai/install.sh | sh
|
curl -fsSL https://ollama.com/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Or by downloading the ollama binary:
|
Or by downloading the ollama binary:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||||
sudo chmod +x /usr/bin/ollama
|
sudo chmod +x /usr/bin/ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Installing specific versions
|
||||||
|
|
||||||
|
Use `OLLAMA_VERSION` environment variable with the install script to install a specific version of Ollama, including pre-releases. You can find the version numbers in the [releases page](https://github.com/ollama/ollama/releases).
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.1.32 sh
|
||||||
|
```
|
||||||
|
|
||||||
## Viewing logs
|
## Viewing logs
|
||||||
|
|
||||||
To view logs of Ollama running as a startup service, run:
|
To view logs of Ollama running as a startup service, run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
journalctl -u ollama
|
journalctl -e -u ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
## Uninstall
|
## Uninstall
|
||||||
@@ -109,8 +134,10 @@ Remove the ollama binary from your bin directory (either `/usr/local/bin`, `/usr
|
|||||||
sudo rm $(which ollama)
|
sudo rm $(which ollama)
|
||||||
```
|
```
|
||||||
|
|
||||||
Remove the downloaded models and Ollama service user:
|
Remove the downloaded models and Ollama service user and group:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo rm -r /usr/share/ollama
|
sudo rm -r /usr/share/ollama
|
||||||
sudo userdel ollama
|
sudo userdel ollama
|
||||||
|
sudo groupdel ollama
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# Ollama Model File
|
# Ollama Model File
|
||||||
|
|
||||||
> Note: this `Modelfile` syntax is in development
|
> Note: `Modelfile` syntax is in development
|
||||||
|
|
||||||
A model file is the blueprint to create and share models with Ollama.
|
A model file is the blueprint to create and share models with Ollama.
|
||||||
|
|
||||||
@@ -10,7 +10,7 @@ A model file is the blueprint to create and share models with Ollama.
|
|||||||
- [Examples](#examples)
|
- [Examples](#examples)
|
||||||
- [Instructions](#instructions)
|
- [Instructions](#instructions)
|
||||||
- [FROM (Required)](#from-required)
|
- [FROM (Required)](#from-required)
|
||||||
- [Build from llama2](#build-from-llama2)
|
- [Build from llama3](#build-from-llama3)
|
||||||
- [Build from a bin file](#build-from-a-bin-file)
|
- [Build from a bin file](#build-from-a-bin-file)
|
||||||
- [PARAMETER](#parameter)
|
- [PARAMETER](#parameter)
|
||||||
- [Valid Parameters and Values](#valid-parameters-and-values)
|
- [Valid Parameters and Values](#valid-parameters-and-values)
|
||||||
@@ -19,6 +19,7 @@ A model file is the blueprint to create and share models with Ollama.
|
|||||||
- [SYSTEM](#system)
|
- [SYSTEM](#system)
|
||||||
- [ADAPTER](#adapter)
|
- [ADAPTER](#adapter)
|
||||||
- [LICENSE](#license)
|
- [LICENSE](#license)
|
||||||
|
- [MESSAGE](#message)
|
||||||
- [Notes](#notes)
|
- [Notes](#notes)
|
||||||
|
|
||||||
## Format
|
## Format
|
||||||
@@ -31,13 +32,14 @@ INSTRUCTION arguments
|
|||||||
```
|
```
|
||||||
|
|
||||||
| Instruction | Description |
|
| Instruction | Description |
|
||||||
| ----------------------------------- | ------------------------------------------------------------- |
|
| ----------------------------------- | -------------------------------------------------------------- |
|
||||||
| [`FROM`](#from-required) (required) | Defines the base model to use. |
|
| [`FROM`](#from-required) (required) | Defines the base model to use. |
|
||||||
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
||||||
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
||||||
| [`SYSTEM`](#system) | Specifies the system prompt that will be set in the template. |
|
| [`SYSTEM`](#system) | Specifies the system message that will be set in the template. |
|
||||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||||
|
| [`MESSAGE`](#message) | Specify message history. |
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
@@ -46,13 +48,13 @@ INSTRUCTION arguments
|
|||||||
An example of a `Modelfile` creating a mario blueprint:
|
An example of a `Modelfile` creating a mario blueprint:
|
||||||
|
|
||||||
```modelfile
|
```modelfile
|
||||||
FROM llama2
|
FROM llama3
|
||||||
# sets the temperature to 1 [higher is more creative, lower is more coherent]
|
# sets the temperature to 1 [higher is more creative, lower is more coherent]
|
||||||
PARAMETER temperature 1
|
PARAMETER temperature 1
|
||||||
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
||||||
PARAMETER num_ctx 4096
|
PARAMETER num_ctx 4096
|
||||||
|
|
||||||
# sets a custom system prompt to specify the behavior of the chat assistant
|
# sets a custom system message to specify the behavior of the chat assistant
|
||||||
SYSTEM You are Mario from super mario bros, acting as an assistant.
|
SYSTEM You are Mario from super mario bros, acting as an assistant.
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -65,33 +67,25 @@ To use this:
|
|||||||
|
|
||||||
More examples are available in the [examples directory](../examples).
|
More examples are available in the [examples directory](../examples).
|
||||||
|
|
||||||
### `Modelfile`s in [ollama.ai/library][1]
|
To view the Modelfile of a given model, use the `ollama show --modelfile` command.
|
||||||
|
|
||||||
There are two ways to view `Modelfile`s underlying the models in [ollama.ai/library][1]:
|
|
||||||
|
|
||||||
- Option 1: view a details page from a model's tags page:
|
|
||||||
1. Go to a particular model's tags (e.g. https://ollama.ai/library/llama2/tags)
|
|
||||||
2. Click on a tag (e.g. https://ollama.ai/library/llama2:13b)
|
|
||||||
3. Scroll down to "Layers"
|
|
||||||
- Note: if the [`FROM` instruction](#from-required) is not present,
|
|
||||||
it means the model was created from a local file
|
|
||||||
- Option 2: use `ollama show` to print the `Modelfile` like so:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
> ollama show --modelfile llama2:13b
|
> ollama show --modelfile llama3
|
||||||
# Modelfile generated by "ollama show"
|
# Modelfile generated by "ollama show"
|
||||||
# To build a new Modelfile based on this one, replace the FROM line with:
|
# To build a new Modelfile based on this one, replace the FROM line with:
|
||||||
# FROM llama2:13b
|
# FROM llama3:latest
|
||||||
|
FROM /Users/pdevine/.ollama/models/blobs/sha256-00e1317cbf74d901080d7100f57580ba8dd8de57203072dc6f668324ba545f29
|
||||||
|
TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||||
|
|
||||||
FROM /root/.ollama/models/blobs/sha256:123abc
|
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
|
||||||
TEMPLATE """[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>
|
|
||||||
|
|
||||||
{{ end }}{{ .Prompt }} [/INST] """
|
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
|
||||||
SYSTEM """"""
|
|
||||||
PARAMETER stop [INST]
|
{{ .Response }}<|eot_id|>"""
|
||||||
PARAMETER stop [/INST]
|
PARAMETER stop "<|start_header_id|>"
|
||||||
PARAMETER stop <<SYS>>
|
PARAMETER stop "<|end_header_id|>"
|
||||||
PARAMETER stop <</SYS>>
|
PARAMETER stop "<|eot_id|>"
|
||||||
|
PARAMETER stop "<|reserved_special_token"
|
||||||
```
|
```
|
||||||
|
|
||||||
## Instructions
|
## Instructions
|
||||||
@@ -104,14 +98,14 @@ The `FROM` instruction defines the base model to use when creating a model.
|
|||||||
FROM <model name>:<tag>
|
FROM <model name>:<tag>
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Build from llama2
|
#### Build from llama3
|
||||||
|
|
||||||
```modelfile
|
```modelfile
|
||||||
FROM llama2
|
FROM llama3
|
||||||
```
|
```
|
||||||
|
|
||||||
A list of available base models:
|
A list of available base models:
|
||||||
<https://github.com/jmorganca/ollama#model-library>
|
<https://github.com/ollama/ollama#model-library>
|
||||||
|
|
||||||
#### Build from a `bin` file
|
#### Build from a `bin` file
|
||||||
|
|
||||||
@@ -129,7 +123,7 @@ The `PARAMETER` instruction defines a parameter that can be set when the model i
|
|||||||
PARAMETER <parameter> <parametervalue>
|
PARAMETER <parameter> <parametervalue>
|
||||||
```
|
```
|
||||||
|
|
||||||
### Valid Parameters and Values
|
#### Valid Parameters and Values
|
||||||
|
|
||||||
| Parameter | Description | Value Type | Example Usage |
|
| Parameter | Description | Value Type | Example Usage |
|
||||||
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -------------------- |
|
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -------------------- |
|
||||||
@@ -137,9 +131,6 @@ PARAMETER <parameter> <parametervalue>
|
|||||||
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
|
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
|
||||||
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
|
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
|
||||||
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
|
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
|
||||||
| num_gqa | The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b | int | num_gqa 1 |
|
|
||||||
| num_gpu | The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 50 |
|
|
||||||
| num_thread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | num_thread 8 |
|
|
||||||
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
||||||
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
|
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
|
||||||
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
|
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
|
||||||
@@ -152,35 +143,28 @@ PARAMETER <parameter> <parametervalue>
|
|||||||
|
|
||||||
### TEMPLATE
|
### TEMPLATE
|
||||||
|
|
||||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
|
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system message, a user's message and the response from the model. Note: syntax may be model specific. Templates use Go [template syntax](https://pkg.go.dev/text/template).
|
||||||
|
|
||||||
#### Template Variables
|
#### Template Variables
|
||||||
|
|
||||||
| Variable | Description |
|
| Variable | Description |
|
||||||
| --------------- | ------------------------------------------------------------------------------------------------------------ |
|
| ----------------- | --------------------------------------------------------------------------------------------- |
|
||||||
| `{{ .System }}` | The system prompt used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
|
| `{{ .System }}` | The system message used to specify custom behavior. |
|
||||||
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
|
| `{{ .Prompt }}` | The user prompt message. |
|
||||||
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
|
| `{{ .Response }}` | The response from the model. When generating a response, text after this variable is omitted. |
|
||||||
|
|
||||||
```modelfile
|
```
|
||||||
TEMPLATE """
|
TEMPLATE """{{ if .System }}<|im_start|>system
|
||||||
{{- if .First }}
|
{{ .System }}<|im_end|>
|
||||||
### System:
|
{{ end }}{{ if .Prompt }}<|im_start|>user
|
||||||
{{ .System }}
|
{{ .Prompt }}<|im_end|>
|
||||||
{{- end }}
|
{{ end }}<|im_start|>assistant
|
||||||
|
|
||||||
### User:
|
|
||||||
{{ .Prompt }}
|
|
||||||
|
|
||||||
### Response:
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
SYSTEM """<system message>"""
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### SYSTEM
|
### SYSTEM
|
||||||
|
|
||||||
The `SYSTEM` instruction specifies the system prompt to be used in the template, if applicable.
|
The `SYSTEM` instruction specifies the system message to be used in the template, if applicable.
|
||||||
|
|
||||||
```modelfile
|
```modelfile
|
||||||
SYSTEM """<system message>"""
|
SYSTEM """<system message>"""
|
||||||
@@ -188,7 +172,7 @@ SYSTEM """<system message>"""
|
|||||||
|
|
||||||
### ADAPTER
|
### ADAPTER
|
||||||
|
|
||||||
The `ADAPTER` instruction specifies the LoRA adapter to apply to the base model. The value of this instruction should be an absolute path or a path relative to the Modelfile and the file must be in a GGML file format. The adapter should be tuned from the base model otherwise the behaviour is undefined.
|
The `ADAPTER` instruction is an optional instruction that specifies any LoRA adapter that should apply to the base model. The value of this instruction should be an absolute path or a path relative to the Modelfile and the file must be in a GGML file format. The adapter should be tuned from the base model otherwise the behaviour is undefined.
|
||||||
|
|
||||||
```modelfile
|
```modelfile
|
||||||
ADAPTER ./ollama-lora.bin
|
ADAPTER ./ollama-lora.bin
|
||||||
@@ -204,9 +188,38 @@ LICENSE """
|
|||||||
"""
|
"""
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### MESSAGE
|
||||||
|
|
||||||
|
The `MESSAGE` instruction allows you to specify a message history for the model to use when responding. Use multiple iterations of the MESSAGE command to build up a conversation which will guide the model to answer in a similar way.
|
||||||
|
|
||||||
|
```modelfile
|
||||||
|
MESSAGE <role> <message>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Valid roles
|
||||||
|
|
||||||
|
| Role | Description |
|
||||||
|
| --------- | ------------------------------------------------------------ |
|
||||||
|
| system | Alternate way of providing the SYSTEM message for the model. |
|
||||||
|
| user | An example message of what the user could have asked. |
|
||||||
|
| assistant | An example message of how the model should respond. |
|
||||||
|
|
||||||
|
|
||||||
|
#### Example conversation
|
||||||
|
|
||||||
|
```modelfile
|
||||||
|
MESSAGE user Is Toronto in Canada?
|
||||||
|
MESSAGE assistant yes
|
||||||
|
MESSAGE user Is Sacramento in Canada?
|
||||||
|
MESSAGE assistant no
|
||||||
|
MESSAGE user Is Ontario in Canada?
|
||||||
|
MESSAGE assistant yes
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
- the **`Modelfile` is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
|
- the **`Modelfile` is not case sensitive**. In the examples, uppercase instructions are used to make it easier to distinguish it from arguments.
|
||||||
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.
|
- Instructions can be in any order. In the examples, the `FROM` instruction is first to keep it easily readable.
|
||||||
|
|
||||||
[1]: https://ollama.ai/library
|
[1]: https://ollama.com/library
|
||||||
|
|||||||
141
docs/openai.md
Normal file
141
docs/openai.md
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
# OpenAI compatibility
|
||||||
|
|
||||||
|
> **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/ollama/ollama/blob/main/docs/api.md).
|
||||||
|
|
||||||
|
Ollama provides experimental compatibility with parts of the [OpenAI API](https://platform.openai.com/docs/api-reference) to help connect existing applications to Ollama.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### OpenAI Python library
|
||||||
|
|
||||||
|
```python
|
||||||
|
from openai import OpenAI
|
||||||
|
|
||||||
|
client = OpenAI(
|
||||||
|
base_url='http://localhost:11434/v1/',
|
||||||
|
|
||||||
|
# required but ignored
|
||||||
|
api_key='ollama',
|
||||||
|
)
|
||||||
|
|
||||||
|
chat_completion = client.chat.completions.create(
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
'role': 'user',
|
||||||
|
'content': 'Say this is a test',
|
||||||
|
}
|
||||||
|
],
|
||||||
|
model='llama3',
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### OpenAI JavaScript library
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
import OpenAI from 'openai'
|
||||||
|
|
||||||
|
const openai = new OpenAI({
|
||||||
|
baseURL: 'http://localhost:11434/v1/',
|
||||||
|
|
||||||
|
// required but ignored
|
||||||
|
apiKey: 'ollama',
|
||||||
|
})
|
||||||
|
|
||||||
|
const chatCompletion = await openai.chat.completions.create({
|
||||||
|
messages: [{ role: 'user', content: 'Say this is a test' }],
|
||||||
|
model: 'llama3',
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
### `curl`
|
||||||
|
|
||||||
|
```
|
||||||
|
curl http://localhost:11434/v1/chat/completions \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"model": "llama3",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": "You are a helpful assistant."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Hello!"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Endpoints
|
||||||
|
|
||||||
|
### `/v1/chat/completions`
|
||||||
|
|
||||||
|
#### Supported features
|
||||||
|
|
||||||
|
- [x] Chat completions
|
||||||
|
- [x] Streaming
|
||||||
|
- [x] JSON mode
|
||||||
|
- [x] Reproducible outputs
|
||||||
|
- [ ] Vision
|
||||||
|
- [ ] Function calling
|
||||||
|
- [ ] Logprobs
|
||||||
|
|
||||||
|
#### Supported request fields
|
||||||
|
|
||||||
|
- [x] `model`
|
||||||
|
- [x] `messages`
|
||||||
|
- [x] Text `content`
|
||||||
|
- [ ] Array of `content` parts
|
||||||
|
- [x] `frequency_penalty`
|
||||||
|
- [x] `presence_penalty`
|
||||||
|
- [x] `response_format`
|
||||||
|
- [x] `seed`
|
||||||
|
- [x] `stop`
|
||||||
|
- [x] `stream`
|
||||||
|
- [x] `temperature`
|
||||||
|
- [x] `top_p`
|
||||||
|
- [x] `max_tokens`
|
||||||
|
- [ ] `logit_bias`
|
||||||
|
- [ ] `tools`
|
||||||
|
- [ ] `tool_choice`
|
||||||
|
- [ ] `user`
|
||||||
|
- [ ] `n`
|
||||||
|
|
||||||
|
#### Notes
|
||||||
|
|
||||||
|
- Setting `seed` will always set `temperature` to `0`
|
||||||
|
- `finish_reason` will always be `stop`
|
||||||
|
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
||||||
|
|
||||||
|
## Models
|
||||||
|
|
||||||
|
Before using a model, pull it locally `ollama pull`:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
ollama pull llama3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Default model names
|
||||||
|
|
||||||
|
For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name:
|
||||||
|
|
||||||
|
```
|
||||||
|
ollama cp llama3 gpt-3.5-turbo
|
||||||
|
```
|
||||||
|
|
||||||
|
Afterwards, this new model name can be specified the `model` field:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/v1/chat/completions \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "Hello!"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
```
|
||||||
87
docs/troubleshooting.md
Normal file
87
docs/troubleshooting.md
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
# How to troubleshoot issues
|
||||||
|
|
||||||
|
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cat ~/.ollama/logs/server.log
|
||||||
|
```
|
||||||
|
|
||||||
|
On **Linux** systems with systemd, the logs can be found with this command:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
journalctl -u ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
docker logs <container-name>
|
||||||
|
```
|
||||||
|
(Use `docker ps` to find the container name)
|
||||||
|
|
||||||
|
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||||
|
|
||||||
|
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
||||||
|
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
||||||
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||||
|
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||||
|
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
||||||
|
|
||||||
|
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
||||||
|
```powershell
|
||||||
|
$env:OLLAMA_DEBUG="1"
|
||||||
|
& "ollama app.exe"
|
||||||
|
```
|
||||||
|
|
||||||
|
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
||||||
|
|
||||||
|
## LLM libraries
|
||||||
|
|
||||||
|
Ollama includes multiple LLM libraries compiled for different GPUs and CPU vector features. Ollama tries to pick the best one based on the capabilities of your system. If this autodetection has problems, or you run into other problems (e.g. crashes in your GPU) you can workaround this by forcing a specific LLM library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest but most compatible is `cpu`. Rosetta emulation under MacOS will work with the `cpu` library.
|
||||||
|
|
||||||
|
In the server log, you will see a message that looks something like this (varies from release to release):
|
||||||
|
|
||||||
|
```
|
||||||
|
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Experimental LLM Library Override**
|
||||||
|
|
||||||
|
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use:
|
||||||
|
|
||||||
|
```
|
||||||
|
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
||||||
|
```
|
||||||
|
|
||||||
|
You can see what features your CPU has with the following.
|
||||||
|
```
|
||||||
|
cat /proc/cpuinfo| grep flags | head -1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Installing older or pre-release versions on Linux
|
||||||
|
|
||||||
|
If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Linux tmp noexec
|
||||||
|
|
||||||
|
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
||||||
|
|
||||||
|
## Container fails to run on NVIDIA GPU
|
||||||
|
|
||||||
|
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||||
|
|
||||||
|
Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||||
|
|
||||||
|
- Is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU.
|
||||||
|
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
||||||
|
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||||
|
- Try rebooting
|
||||||
|
- Make sure you're running the latest nvidia drivers
|
||||||
|
|
||||||
|
If none of those resolve the problem, gather additional information and file an issue:
|
||||||
|
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
||||||
|
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
||||||
83
docs/tutorials/fly-gpu.md
Normal file
83
docs/tutorials/fly-gpu.md
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
# Running Ollama on Fly.io GPU Instances
|
||||||
|
|
||||||
|
Ollama runs with little to no configuration on [Fly.io GPU instances](https://fly.io/docs/gpus/gpu-quickstart/). If you don't have access to GPUs yet, you'll need to [apply for access](https://fly.io/gpu/) on the waitlist. Once you're accepted, you'll get an email with instructions on how to get started.
|
||||||
|
|
||||||
|
Create a new app with `fly apps create`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fly apps create
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create a `fly.toml` file in a new folder that looks like this:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
app = "sparkling-violet-709"
|
||||||
|
primary_region = "ord"
|
||||||
|
vm.size = "a100-40gb" # see https://fly.io/docs/gpus/gpu-quickstart/ for more info
|
||||||
|
|
||||||
|
[build]
|
||||||
|
image = "ollama/ollama"
|
||||||
|
|
||||||
|
[http_service]
|
||||||
|
internal_port = 11434
|
||||||
|
force_https = false
|
||||||
|
auto_stop_machines = true
|
||||||
|
auto_start_machines = true
|
||||||
|
min_machines_running = 0
|
||||||
|
processes = ["app"]
|
||||||
|
|
||||||
|
[mounts]
|
||||||
|
source = "models"
|
||||||
|
destination = "/root/.ollama"
|
||||||
|
initial_size = "100gb"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then create a [new private IPv6 address](https://fly.io/docs/reference/private-networking/#flycast-private-load-balancing) for your app:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fly ips allocate-v6 --private
|
||||||
|
```
|
||||||
|
|
||||||
|
Then deploy your app:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
fly deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
And finally you can access it interactively with a new Fly.io Machine:
|
||||||
|
|
||||||
|
```
|
||||||
|
fly machine run -e OLLAMA_HOST=http://your-app-name.flycast --shell ollama/ollama
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ollama run openchat:7b-v3.5-fp16
|
||||||
|
>>> How do I bake chocolate chip cookies?
|
||||||
|
To bake chocolate chip cookies, follow these steps:
|
||||||
|
|
||||||
|
1. Preheat the oven to 375°F (190°C) and line a baking sheet with parchment paper or silicone baking mat.
|
||||||
|
|
||||||
|
2. In a large bowl, mix together 1 cup of unsalted butter (softened), 3/4 cup granulated sugar, and 3/4
|
||||||
|
cup packed brown sugar until light and fluffy.
|
||||||
|
|
||||||
|
3. Add 2 large eggs, one at a time, to the butter mixture, beating well after each addition. Stir in 1
|
||||||
|
teaspoon of pure vanilla extract.
|
||||||
|
|
||||||
|
4. In a separate bowl, whisk together 2 cups all-purpose flour, 1/2 teaspoon baking soda, and 1/2 teaspoon
|
||||||
|
salt. Gradually add the dry ingredients to the wet ingredients, stirring until just combined.
|
||||||
|
|
||||||
|
5. Fold in 2 cups of chocolate chips (or chunks) into the dough.
|
||||||
|
|
||||||
|
6. Drop rounded tablespoons of dough onto the prepared baking sheet, spacing them about 2 inches apart.
|
||||||
|
|
||||||
|
7. Bake for 10-12 minutes, or until the edges are golden brown. The centers should still be slightly soft.
|
||||||
|
|
||||||
|
8. Allow the cookies to cool on the baking sheet for a few minutes before transferring them to a wire rack
|
||||||
|
to cool completely.
|
||||||
|
|
||||||
|
Enjoy your homemade chocolate chip cookies!
|
||||||
|
```
|
||||||
|
|
||||||
|
When you set it up like this, it will automatically turn off when you're done using it. Then when you access it again, it will automatically turn back on. This is a great way to save money on GPU instances when you're not using them. If you want a persistent wake-on-use connection to your Ollama instance, you can set up a [connection to your Fly network using WireGuard](https://fly.io/docs/reference/private-networking/#discovering-apps-through-dns-on-a-wireguard-connection). Then you can access your Ollama instance at `http://your-app-name.flycast`.
|
||||||
|
|
||||||
|
And that's it!
|
||||||
@@ -5,25 +5,25 @@ In this tutorial, we are going to use JavaScript with LangChain and Ollama to le
|
|||||||
To get started, let's just use **LangChain** to ask a simple question to a model. To do this with JavaScript, we need to install **LangChain**:
|
To get started, let's just use **LangChain** to ask a simple question to a model. To do this with JavaScript, we need to install **LangChain**:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install langchain
|
npm install @langchain/community
|
||||||
```
|
```
|
||||||
|
|
||||||
Now we can start building out our JavaScript:
|
Now we can start building out our JavaScript:
|
||||||
|
|
||||||
```javascript
|
```javascript
|
||||||
import { Ollama } from "langchain/llms/ollama";
|
import { Ollama } from "@langchain/community/llms/ollama";
|
||||||
|
|
||||||
const ollama = new Ollama({
|
const ollama = new Ollama({
|
||||||
baseUrl: "http://localhost:11434",
|
baseUrl: "http://localhost:11434",
|
||||||
model: "llama2",
|
model: "llama3",
|
||||||
});
|
});
|
||||||
|
|
||||||
const answer = await ollama.call(`why is the sky blue?`);
|
const answer = await ollama.invoke(`why is the sky blue?`);
|
||||||
|
|
||||||
console.log(answer);
|
console.log(answer);
|
||||||
```
|
```
|
||||||
|
|
||||||
That will get us the same thing as if we ran `ollama run llama2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
That will get us the same thing as if we ran `ollama run llama3 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
npm install cheerio
|
npm install cheerio
|
||||||
|
|||||||
@@ -12,15 +12,17 @@ So let's figure out how we can use **LangChain** with Ollama to ask our question
|
|||||||
|
|
||||||
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
|
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
|
||||||
|
|
||||||
`pip install langchain`
|
`pip install langchain_community`
|
||||||
|
|
||||||
Then we can create a model and ask the question:
|
Then we can create a model and ask the question:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from langchain.llms import Ollama
|
from langchain_community.llms import Ollama
|
||||||
ollama = Ollama(base_url='http://localhost:11434',
|
ollama = Ollama(
|
||||||
model="llama2")
|
base_url='http://localhost:11434',
|
||||||
print(ollama("why is the sky blue"))
|
model="llama3"
|
||||||
|
)
|
||||||
|
print(ollama.invoke("why is the sky blue"))
|
||||||
```
|
```
|
||||||
|
|
||||||
Notice that we are defining the model and the base URL for Ollama.
|
Notice that we are defining the model and the base URL for Ollama.
|
||||||
@@ -42,12 +44,12 @@ text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
|||||||
all_splits = text_splitter.split_documents(data)
|
all_splits = text_splitter.split_documents(data)
|
||||||
```
|
```
|
||||||
|
|
||||||
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install GPT4All chromadb`
|
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install chromadb`
|
||||||
|
We also need to pull embedding model: `ollama pull nomic-embed-text`
|
||||||
```python
|
```python
|
||||||
from langchain.embeddings import OllamaEmbeddings
|
from langchain.embeddings import OllamaEmbeddings
|
||||||
from langchain.vectorstores import Chroma
|
from langchain.vectorstores import Chroma
|
||||||
oembed = OllamaEmbeddings(base_url="http://localhost:11434", model="llama2")
|
oembed = OllamaEmbeddings(base_url="http://localhost:11434", model="nomic-embed-text")
|
||||||
vectorstore = Chroma.from_documents(documents=all_splits, embedding=oembed)
|
vectorstore = Chroma.from_documents(documents=all_splits, embedding=oembed)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -66,7 +68,8 @@ The next thing is to send the question and the relevant parts of the docs to the
|
|||||||
```python
|
```python
|
||||||
from langchain.chains import RetrievalQA
|
from langchain.chains import RetrievalQA
|
||||||
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
|
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
|
||||||
qachain({"query": question})
|
res = qachain.invoke({"query": question})
|
||||||
|
print(res['result'])
|
||||||
```
|
```
|
||||||
|
|
||||||
The answer received from this chain was:
|
The answer received from this chain was:
|
||||||
|
|||||||
@@ -1,38 +1,15 @@
|
|||||||
# Running Ollama on NVIDIA Jetson Devices
|
# Running Ollama on NVIDIA Jetson Devices
|
||||||
|
|
||||||
With some minor configuration, Ollama runs well on [NVIDIA Jetson Devices](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/). The following has been tested on [JetPack 5.1.2](https://developer.nvidia.com/embedded/jetpack).
|
Ollama runs well on [NVIDIA Jetson Devices](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/) and should run out of the box with the standard installation instructions.
|
||||||
|
|
||||||
NVIDIA Jetson devices are Linux-based embedded AI computers that are purpose-built for AI applications.
|
The following has been tested on [JetPack 5.1.2](https://developer.nvidia.com/embedded/jetpack), but should also work on JetPack 6.0.
|
||||||
|
|
||||||
Jetsons have an integrated GPU that is wired directly to the memory controller of the machine. For this reason, the `nvidia-smi` command is unrecognized, and Ollama proceeds to operate in "CPU only"
|
- Install Ollama via standard Linux command (ignore the 404 error): `curl https://ollama.com/install.sh | sh`
|
||||||
mode. This can be verified by using a monitoring tool like jtop.
|
|
||||||
|
|
||||||
In order to address this, we simply pass the path to the Jetson's pre-installed CUDA libraries into `ollama serve` (while in a tmux session). We then hardcode the num_gpu parameters into a cloned
|
|
||||||
version of our target model.
|
|
||||||
|
|
||||||
Prerequisites:
|
|
||||||
|
|
||||||
- curl
|
|
||||||
- tmux
|
|
||||||
|
|
||||||
Here are the steps:
|
|
||||||
|
|
||||||
- Install Ollama via standard Linux command (ignore the 404 error): `curl https://ollama.ai/install.sh | sh`
|
|
||||||
- Stop the Ollama service: `sudo systemctl stop ollama`
|
|
||||||
- Start Ollama serve in a tmux session called ollama_jetson and reference the CUDA libraries path: `tmux has-session -t ollama_jetson 2>/dev/null || tmux new-session -d -s ollama_jetson
|
|
||||||
'LD_LIBRARY_PATH=/usr/local/cuda/lib64 ollama serve'`
|
|
||||||
- Pull the model you want to use (e.g. mistral): `ollama pull mistral`
|
- Pull the model you want to use (e.g. mistral): `ollama pull mistral`
|
||||||
- Create a new Modelfile specifically for enabling GPU support on the Jetson: `touch ModelfileMistralJetson`
|
- Start an interactive session: `ollama run mistral`
|
||||||
- In the ModelfileMistralJetson file, specify the FROM model and the num_gpu PARAMETER as shown below:
|
|
||||||
|
|
||||||
```
|
|
||||||
FROM mistral
|
|
||||||
PARAMETER num_gpu 999
|
|
||||||
```
|
|
||||||
|
|
||||||
- Create a new model from your Modelfile: `ollama create mistral-jetson -f ./ModelfileMistralJetson`
|
|
||||||
- Run the new model: `ollama run mistral-jetson`
|
|
||||||
|
|
||||||
If you run a monitoring tool like jtop you should now see that Ollama is using the Jetson's integrated GPU.
|
|
||||||
|
|
||||||
And that's it!
|
And that's it!
|
||||||
|
|
||||||
|
# Running Ollama in Docker
|
||||||
|
|
||||||
|
When running GPU accelerated applications in Docker, it is highly recommended to use [dusty-nv jetson-containers repo](https://github.com/dusty-nv/jetson-containers).
|
||||||
61
docs/windows.md
Normal file
61
docs/windows.md
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
# Ollama Windows Preview
|
||||||
|
|
||||||
|
Welcome to the Ollama Windows preview.
|
||||||
|
|
||||||
|
No more WSL required!
|
||||||
|
|
||||||
|
Ollama now runs as a native Windows application, including NVIDIA and AMD Radeon GPU support.
|
||||||
|
After installing Ollama Windows Preview, Ollama will run in the background and
|
||||||
|
the `ollama` command line is available in `cmd`, `powershell` or your favorite
|
||||||
|
terminal application. As usual the Ollama [api](./api.md) will be served on
|
||||||
|
`http://localhost:11434`.
|
||||||
|
|
||||||
|
As this is a preview release, you should expect a few bugs here and there. If
|
||||||
|
you run into a problem you can reach out on
|
||||||
|
[Discord](https://discord.gg/ollama), or file an
|
||||||
|
[issue](https://github.com/ollama/ollama/issues).
|
||||||
|
Logs will often be helpful in diagnosing the problem (see
|
||||||
|
[Troubleshooting](#troubleshooting) below)
|
||||||
|
|
||||||
|
## System Requirements
|
||||||
|
|
||||||
|
* Windows 10 or newer, Home or Pro
|
||||||
|
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
||||||
|
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
||||||
|
|
||||||
|
## API Access
|
||||||
|
|
||||||
|
Here's a quick example showing API access from `powershell`
|
||||||
|
```powershell
|
||||||
|
(Invoke-WebRequest -method POST -Body '{"model":"llama3", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
|
||||||
|
a "view logs" menu item to the app, and increases logging for the GUI app and
|
||||||
|
server.
|
||||||
|
|
||||||
|
Ollama on Windows stores files in a few different locations. You can view them in
|
||||||
|
the explorer window by hitting `<cmd>+R` and type in:
|
||||||
|
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
||||||
|
- *app.log* contains logs from the GUI application
|
||||||
|
- *server.log* contains the server logs
|
||||||
|
- *upgrade.log* contains log output for upgrades
|
||||||
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
||||||
|
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
||||||
|
- `explorer %TEMP%` contains temporary executable files in one or more `ollama*` directories
|
||||||
|
|
||||||
|
|
||||||
|
## Standalone CLI
|
||||||
|
|
||||||
|
The easiest way to install Ollama on Windows is to use the `OllamaSetup.exe`
|
||||||
|
installer. It installs in your account without requiring Administrator rights.
|
||||||
|
We update Ollama regularly to support the latest models, and this installer will
|
||||||
|
help you keep up to date.
|
||||||
|
|
||||||
|
If you'd like to install or integrate Ollama as a service, a standalone
|
||||||
|
`ollama-windows-amd64.zip` zip file is available containing only the Ollama CLI
|
||||||
|
and GPU library dependencies for Nvidia and AMD. This allows for embedding
|
||||||
|
Ollama in existing applications, or running it as a system service via `ollama
|
||||||
|
serve` with tools such as [NSSM](https://nssm.cc/).
|
||||||
302
envconfig/config.go
Normal file
302
envconfig/config.go
Normal file
@@ -0,0 +1,302 @@
|
|||||||
|
package envconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OllamaHost struct {
|
||||||
|
Scheme string
|
||||||
|
Host string
|
||||||
|
Port string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o OllamaHost) String() string {
|
||||||
|
return fmt.Sprintf("%s://%s:%s", o.Scheme, o.Host, o.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Set via OLLAMA_ORIGINS in the environment
|
||||||
|
AllowOrigins []string
|
||||||
|
// Set via OLLAMA_DEBUG in the environment
|
||||||
|
Debug bool
|
||||||
|
// Experimental flash attention
|
||||||
|
FlashAttention bool
|
||||||
|
// Set via OLLAMA_HOST in the environment
|
||||||
|
Host *OllamaHost
|
||||||
|
// Set via OLLAMA_KEEP_ALIVE in the environment
|
||||||
|
KeepAlive string
|
||||||
|
// Set via OLLAMA_LLM_LIBRARY in the environment
|
||||||
|
LLMLibrary string
|
||||||
|
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
||||||
|
MaxRunners int
|
||||||
|
// Set via OLLAMA_MAX_QUEUE in the environment
|
||||||
|
MaxQueuedRequests int
|
||||||
|
// Set via OLLAMA_MODELS in the environment
|
||||||
|
ModelsDir string
|
||||||
|
// Set via OLLAMA_MAX_VRAM in the environment
|
||||||
|
MaxVRAM uint64
|
||||||
|
// Set via OLLAMA_NOHISTORY in the environment
|
||||||
|
NoHistory bool
|
||||||
|
// Set via OLLAMA_NOPRUNE in the environment
|
||||||
|
NoPrune bool
|
||||||
|
// Set via OLLAMA_NUM_PARALLEL in the environment
|
||||||
|
NumParallel int
|
||||||
|
// Set via OLLAMA_RUNNERS_DIR in the environment
|
||||||
|
RunnersDir string
|
||||||
|
// Set via OLLAMA_TMPDIR in the environment
|
||||||
|
TmpDir string
|
||||||
|
)
|
||||||
|
|
||||||
|
type EnvVar struct {
|
||||||
|
Name string
|
||||||
|
Value any
|
||||||
|
Description string
|
||||||
|
}
|
||||||
|
|
||||||
|
func AsMap() map[string]EnvVar {
|
||||||
|
return map[string]EnvVar{
|
||||||
|
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||||
|
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
|
||||||
|
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||||
|
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||||
|
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
||||||
|
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
||||||
|
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||||
|
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
||||||
|
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
||||||
|
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||||
|
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||||
|
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
||||||
|
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
||||||
|
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
||||||
|
"OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir, "Location for temporary files"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Values() map[string]string {
|
||||||
|
vals := make(map[string]string)
|
||||||
|
for k, v := range AsMap() {
|
||||||
|
vals[k] = fmt.Sprintf("%v", v.Value)
|
||||||
|
}
|
||||||
|
return vals
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultAllowOrigins = []string{
|
||||||
|
"localhost",
|
||||||
|
"127.0.0.1",
|
||||||
|
"0.0.0.0",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean quotes and spaces from the value
|
||||||
|
func clean(key string) string {
|
||||||
|
return strings.Trim(os.Getenv(key), "\"' ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// default values
|
||||||
|
NumParallel = 1
|
||||||
|
MaxRunners = 1
|
||||||
|
MaxQueuedRequests = 512
|
||||||
|
|
||||||
|
LoadConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadConfig() {
|
||||||
|
if debug := clean("OLLAMA_DEBUG"); debug != "" {
|
||||||
|
d, err := strconv.ParseBool(debug)
|
||||||
|
if err == nil {
|
||||||
|
Debug = d
|
||||||
|
} else {
|
||||||
|
Debug = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fa := clean("OLLAMA_FLASH_ATTENTION"); fa != "" {
|
||||||
|
d, err := strconv.ParseBool(fa)
|
||||||
|
if err == nil {
|
||||||
|
FlashAttention = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
RunnersDir = clean("OLLAMA_RUNNERS_DIR")
|
||||||
|
if runtime.GOOS == "windows" && RunnersDir == "" {
|
||||||
|
// On Windows we do not carry the payloads inside the main executable
|
||||||
|
appExe, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to lookup executable path", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to lookup working directory", "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var paths []string
|
||||||
|
for _, root := range []string{filepath.Dir(appExe), cwd} {
|
||||||
|
paths = append(paths,
|
||||||
|
root,
|
||||||
|
filepath.Join(root, "windows-"+runtime.GOARCH),
|
||||||
|
filepath.Join(root, "dist", "windows-"+runtime.GOARCH),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try a few variations to improve developer experience when building from source in the local tree
|
||||||
|
for _, p := range paths {
|
||||||
|
candidate := filepath.Join(p, "ollama_runners")
|
||||||
|
_, err := os.Stat(candidate)
|
||||||
|
if err == nil {
|
||||||
|
RunnersDir = candidate
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if RunnersDir == "" {
|
||||||
|
slog.Error("unable to locate llm runner directory. Set OLLAMA_RUNNERS_DIR to the location of 'ollama_runners'")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TmpDir = clean("OLLAMA_TMPDIR")
|
||||||
|
|
||||||
|
userLimit := clean("OLLAMA_MAX_VRAM")
|
||||||
|
if userLimit != "" {
|
||||||
|
avail, err := strconv.ParseUint(userLimit, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_VRAM", userLimit, "error", err)
|
||||||
|
} else {
|
||||||
|
MaxVRAM = avail
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LLMLibrary = clean("OLLAMA_LLM_LIBRARY")
|
||||||
|
|
||||||
|
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
||||||
|
val, err := strconv.Atoi(onp)
|
||||||
|
if err != nil || val <= 0 {
|
||||||
|
slog.Error("invalid setting must be greater than zero", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
||||||
|
} else {
|
||||||
|
NumParallel = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if nohistory := clean("OLLAMA_NOHISTORY"); nohistory != "" {
|
||||||
|
NoHistory = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if noprune := clean("OLLAMA_NOPRUNE"); noprune != "" {
|
||||||
|
NoPrune = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if origins := clean("OLLAMA_ORIGINS"); origins != "" {
|
||||||
|
AllowOrigins = strings.Split(origins, ",")
|
||||||
|
}
|
||||||
|
for _, allowOrigin := range defaultAllowOrigins {
|
||||||
|
AllowOrigins = append(AllowOrigins,
|
||||||
|
fmt.Sprintf("http://%s", allowOrigin),
|
||||||
|
fmt.Sprintf("https://%s", allowOrigin),
|
||||||
|
fmt.Sprintf("http://%s", net.JoinHostPort(allowOrigin, "*")),
|
||||||
|
fmt.Sprintf("https://%s", net.JoinHostPort(allowOrigin, "*")),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
AllowOrigins = append(AllowOrigins,
|
||||||
|
"app://*",
|
||||||
|
"file://*",
|
||||||
|
"tauri://*",
|
||||||
|
)
|
||||||
|
|
||||||
|
maxRunners := clean("OLLAMA_MAX_LOADED_MODELS")
|
||||||
|
if maxRunners != "" {
|
||||||
|
m, err := strconv.Atoi(maxRunners)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
||||||
|
} else {
|
||||||
|
MaxRunners = m
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
||||||
|
p, err := strconv.Atoi(onp)
|
||||||
|
if err != nil || p <= 0 {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
||||||
|
} else {
|
||||||
|
MaxQueuedRequests = p
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
KeepAlive = clean("OLLAMA_KEEP_ALIVE")
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ModelsDir, err = getModelsDir()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_MODELS", ModelsDir, "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
Host, err = getOllamaHost()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_HOST", Host, "error", err, "using default port", Host.Port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getModelsDir() (string, error) {
|
||||||
|
if models, exists := os.LookupEnv("OLLAMA_MODELS"); exists {
|
||||||
|
return models, nil
|
||||||
|
}
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return filepath.Join(home, ".ollama", "models"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOllamaHost() (*OllamaHost, error) {
|
||||||
|
defaultPort := "11434"
|
||||||
|
|
||||||
|
hostVar := os.Getenv("OLLAMA_HOST")
|
||||||
|
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
|
||||||
|
|
||||||
|
scheme, hostport, ok := strings.Cut(hostVar, "://")
|
||||||
|
switch {
|
||||||
|
case !ok:
|
||||||
|
scheme, hostport = "http", hostVar
|
||||||
|
case scheme == "http":
|
||||||
|
defaultPort = "80"
|
||||||
|
case scheme == "https":
|
||||||
|
defaultPort = "443"
|
||||||
|
}
|
||||||
|
|
||||||
|
// trim trailing slashes
|
||||||
|
hostport = strings.TrimRight(hostport, "/")
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(hostport)
|
||||||
|
if err != nil {
|
||||||
|
host, port = "127.0.0.1", defaultPort
|
||||||
|
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
||||||
|
host = ip.String()
|
||||||
|
} else if hostport != "" {
|
||||||
|
host = hostport
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
|
||||||
|
return &OllamaHost{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: host,
|
||||||
|
Port: defaultPort,
|
||||||
|
}, ErrInvalidHostPort
|
||||||
|
}
|
||||||
|
|
||||||
|
return &OllamaHost{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: host,
|
||||||
|
Port: port,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
71
envconfig/config_test.go
Normal file
71
envconfig/config_test.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
package envconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConfig(t *testing.T) {
|
||||||
|
Debug = false // Reset whatever was loaded in init()
|
||||||
|
t.Setenv("OLLAMA_DEBUG", "")
|
||||||
|
LoadConfig()
|
||||||
|
require.False(t, Debug)
|
||||||
|
t.Setenv("OLLAMA_DEBUG", "false")
|
||||||
|
LoadConfig()
|
||||||
|
require.False(t, Debug)
|
||||||
|
t.Setenv("OLLAMA_DEBUG", "1")
|
||||||
|
LoadConfig()
|
||||||
|
require.True(t, Debug)
|
||||||
|
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
||||||
|
LoadConfig()
|
||||||
|
require.True(t, FlashAttention)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientFromEnvironment(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
value string
|
||||||
|
expect string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
hostTestCases := map[string]*testCase{
|
||||||
|
"empty": {value: "", expect: "127.0.0.1:11434"},
|
||||||
|
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
|
||||||
|
"only port": {value: ":1234", expect: ":1234"},
|
||||||
|
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
|
||||||
|
"hostname": {value: "example.com", expect: "example.com:11434"},
|
||||||
|
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
|
||||||
|
"zero port": {value: ":0", expect: ":0"},
|
||||||
|
"too large port": {value: ":66000", err: ErrInvalidHostPort},
|
||||||
|
"too small port": {value: ":-1", err: ErrInvalidHostPort},
|
||||||
|
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
|
||||||
|
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
|
||||||
|
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
|
||||||
|
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
|
||||||
|
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
|
||||||
|
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
|
||||||
|
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
|
||||||
|
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range hostTestCases {
|
||||||
|
t.Run(k, func(t *testing.T) {
|
||||||
|
t.Setenv("OLLAMA_HOST", v.value)
|
||||||
|
LoadConfig()
|
||||||
|
|
||||||
|
oh, err := getOllamaHost()
|
||||||
|
if err != v.err {
|
||||||
|
t.Fatalf("expected %s, got %s", v.err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
host := net.JoinHostPort(oh.Host, oh.Port)
|
||||||
|
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
3
examples/.gitignore
vendored
3
examples/.gitignore
vendored
@@ -1,7 +1,10 @@
|
|||||||
node_modules
|
node_modules
|
||||||
|
bun.lockb
|
||||||
|
.vscode
|
||||||
# OSX
|
# OSX
|
||||||
.DS_STORE
|
.DS_STORE
|
||||||
|
|
||||||
|
|
||||||
# Models
|
# Models
|
||||||
models/
|
models/
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +0,0 @@
|
|||||||
# Bash Shell examples
|
|
||||||
|
|
||||||
When calling `ollama`, you can pass it a file to run all the prompts in the file, one after the other:
|
|
||||||
|
|
||||||
`ollama run llama2 < sourcequestions.txt`
|
|
||||||
|
|
||||||
This concept is used in the following example.
|
|
||||||
|
|
||||||
## Compare Models
|
|
||||||
`comparemodels.sh` is a script that runs all the questions in `sourcequestions.txt` using any 4 models you choose that you have already pulled from the Ollama library or have created locally.
|
|
||||||
@@ -1,64 +0,0 @@
|
|||||||
#! /usr/bin/env bash
|
|
||||||
# Compare multiple models by running them with the same questions
|
|
||||||
|
|
||||||
NUMBEROFCHOICES=4
|
|
||||||
SELECTIONS=()
|
|
||||||
declare -a SUMS=()
|
|
||||||
|
|
||||||
# Get the list of models
|
|
||||||
CHOICES=$(ollama list | awk '{print $1}')
|
|
||||||
|
|
||||||
# Select which models to run as a comparison
|
|
||||||
echo "Select $NUMBEROFCHOICES models to compare:"
|
|
||||||
select ITEM in $CHOICES; do
|
|
||||||
if [[ -n $ITEM ]]; then
|
|
||||||
echo "You have selected $ITEM"
|
|
||||||
SELECTIONS+=("$ITEM")
|
|
||||||
((COUNT++))
|
|
||||||
if [[ $COUNT -eq $NUMBEROFCHOICES ]]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Invalid selection"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Loop through each of the selected models
|
|
||||||
for ITEM in "${SELECTIONS[@]}"; do
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo "Loading the model $ITEM into memory"
|
|
||||||
ollama run "$ITEM" ""
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo "Running the questions through the model $ITEM"
|
|
||||||
COMMAND_OUTPUT=$(ollama run "$ITEM" --verbose < sourcequestions.txt 2>&1| tee /dev/stderr)
|
|
||||||
|
|
||||||
# eval duration is sometimes listed in seconds and sometimes in milliseconds.
|
|
||||||
# Add up the values for each model
|
|
||||||
SUM=$(echo "$COMMAND_OUTPUT" | awk '
|
|
||||||
/eval duration:/ {
|
|
||||||
value = $3
|
|
||||||
if (index(value, "ms") > 0) {
|
|
||||||
gsub("ms", "", value)
|
|
||||||
value /= 1000
|
|
||||||
} else {
|
|
||||||
gsub("s", "", value)
|
|
||||||
}
|
|
||||||
sum += value
|
|
||||||
}
|
|
||||||
END { print sum }')
|
|
||||||
|
|
||||||
|
|
||||||
SUMS+=("All questions for $ITEM completed in $SUM seconds")
|
|
||||||
done
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo -e "Sums of eval durations for each run:"
|
|
||||||
for val in "${SUMS[@]}"; do
|
|
||||||
echo "$val"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
echo "Comparison complete. Now you can decide"
|
|
||||||
echo "which model is best."
|
|
||||||
echo "--------------------------------------------------------------"
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
Why is the sky blue
|
|
||||||
What is a black hole
|
|
||||||
Explain the big bang theory like I am 5?
|
|
||||||
What is the quickest way to win a game of Monopoly with 3 others?
|
|
||||||
Why does a vacuum bottle keep my coffee hot and my milkshake cold?
|
|
||||||
What is the difference between a meteor, a meteorite, and a meteoroid?
|
|
||||||
Create an array with 5 items and print to the console. Do this in Python, C#, Typescript, and Rust.
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user