Compare commits

...

327 Commits

Author SHA1 Message Date
Jeffrey Morgan
177b69a211 add missing entries for 34B 2023-08-25 18:35:35 -07:00
Michael Yang
dad63f0821 Merge pull request #411 from jmorganca/mxyng/34b
patch llama.cpp for 34B
2023-08-25 11:59:05 -07:00
Michael Yang
7a378f8b66 patch llama.cpp for 34B 2023-08-25 10:06:55 -07:00
Michael Yang
de0bdd7f29 Merge pull request #405 from jmorganca/mxyng/34b
add 34b model type
2023-08-24 10:37:22 -07:00
Michael Yang
b1cececb8e add 34b model type 2023-08-24 10:35:44 -07:00
Michael Yang
e0d39fa3bf Merge pull request #398 from jmorganca/mxyng/cleanup
Mxyng/cleanup
2023-08-22 15:51:41 -07:00
Michael Yang
968ced2e71 Merge pull request #393 from jmorganca/mxyng/net-url
use url.URL
2023-08-22 15:51:33 -07:00
Michael Yang
32d1a00017 remove unused requestContextKey 2023-08-22 10:49:54 -07:00
Michael Yang
04e2128273 move upload funcs to upload.go 2023-08-22 10:49:53 -07:00
Michael Yang
2cc634689b use url.URL 2023-08-22 10:49:07 -07:00
Michael Yang
8f827641b0 Merge pull request #397 from jmorganca/mxyng/release-mode
build release mode
2023-08-22 10:48:44 -07:00
Michael Yang
95187d7e1e build release mode 2023-08-22 09:52:43 -07:00
Michael Yang
9ec7e37534 Merge pull request #392 from jmorganca/mxyng/version
add version
2023-08-22 09:50:25 -07:00
Michael Yang
2c7f956b38 add version 2023-08-22 09:40:58 -07:00
Jeffrey Morgan
a9f6c56652 fix FROM instruction erroring when referring to a file 2023-08-22 09:39:42 -07:00
Ryan Baker
0a892419ad Strip protocol from model path (#377) 2023-08-21 21:56:56 -07:00
Jeffrey Morgan
e3054fc74e add .env to .dockerignore 2023-08-21 09:32:02 -07:00
Michael Yang
23c2485044 Merge pull request #381 from jmorganca/mxyng/fix-push-chunks
retry on unauthorized chunk push
2023-08-18 13:49:25 -07:00
Michael Yang
386c66f285 Merge pull request #378 from jmorganca/mxyng/copy-metadata-from-source
copy metadata from source
2023-08-18 13:49:09 -07:00
Michael Yang
3b49315f97 retry on unauthorized chunk push
The token printed for authorized requests has a lifetime of 1h. If an
upload exceeds 1h, a chunk push will fail since the token is created on
a "start upload" request.

This replaces the Pipe with SectionReader which is simpler and
implements Seek, a requirement for makeRequestWithRetry. This is
slightly worse than using a Pipe since the progress update is directly
tied to the chunk size instead of controlled separately.
2023-08-18 11:23:47 -07:00
Michael Yang
5ca05c2e88 fix ModelType() 2023-08-18 11:23:38 -07:00
Michael Yang
7eda70f23b copy metadata from source 2023-08-17 21:55:25 -07:00
Jeffrey Morgan
3d79b414d3 app: package ggml-metal.metal from correct directory 2023-08-17 23:55:45 -04:00
Michael Yang
c84bbf1dd6 Merge pull request #376 from jmorganca/mxyng/from-map-ignore-nil
ignore nil map values
2023-08-17 15:57:12 -07:00
Michael Yang
f723bf0879 ignore nil map values 2023-08-17 15:50:46 -07:00
Michael Yang
cbf725a9ba Merge pull request #375 from jmorganca/mxyng/fix-push
fix push manifest
2023-08-17 15:33:31 -07:00
Michael Yang
086449b6c7 fmt 2023-08-17 15:32:31 -07:00
Michael Yang
3cbc6a5c01 fix push manifest 2023-08-17 15:28:12 -07:00
Jeffrey Morgan
54bb49a502 parse protocol for OLLAMA_HOST 2023-08-17 18:20:44 -04:00
Michael Yang
cabaada956 Merge pull request #372 from jmorganca/mxyng/string-types
model and file type as strings
2023-08-17 15:10:59 -07:00
Michael Yang
a894cc792d model and file type as strings 2023-08-17 12:08:04 -07:00
Bruce MacDonald
519f4d98ef add embed docs for modelfile 2023-08-17 13:37:42 -04:00
Michael Yang
b963a83559 Merge pull request #364 from jmorganca/chunked-uploads
reimplement chunked uploads
2023-08-17 09:58:51 -07:00
Michael Yang
bf6688abe6 Merge pull request #360 from jmorganca/fix-request-copies
Fix request copies
2023-08-17 09:58:42 -07:00
Bruce MacDonald
6005b157c2 retry download on network errors 2023-08-17 10:31:45 -04:00
Patrick Devine
14220d9833 set the scopes correctly (#368) 2023-08-16 21:42:02 -07:00
Michael Chiang
8ca50f24f3 fix nous-hermes model file size listing in readme (#367)
fix nous-hermes model file size listing in readme
2023-08-16 23:42:00 -04:00
Michael Chiang
c149fc3143 Update README.md 2023-08-16 22:54:55 -04:00
Michael Chiang
afbc763dac adding link to models directly available on ollama (#366)
- adding link to models directly available on ollama

- ability to push your own models to the library will come in the future
2023-08-16 22:53:27 -04:00
Michael Yang
5dfe91be8b reimplement chunked uploads 2023-08-16 14:50:24 -07:00
Michael Yang
9f944c00f1 push: retry on unauthorized 2023-08-16 11:35:33 -07:00
Michael Yang
56e87cecb1 images: remove body copies 2023-08-16 10:30:41 -07:00
Jeffrey Morgan
5ee6116420 set default OLLAMA_HOST to http://localhost:11434 2023-08-16 12:22:59 -04:00
Michael Yang
5d9a4cd251 Merge pull request #348 from jmorganca/cross-repo-mount
cross repo blob mount
2023-08-16 09:20:36 -07:00
Michael Yang
0ebec07569 Merge pull request #345 from jmorganca/exit-non-zero
set non-zero error code on error
2023-08-16 09:20:28 -07:00
Matt Williams
08265515b3 Merge pull request #303 from jmorganca/matt/dockerit
DockerIt example
2023-08-16 08:04:34 -07:00
Blake Mizerany
67e593e355 cmd: support OLLAMA_CLIENT_HOST environment variable (#262)
* cmd: support OLLAMA_HOST environment variable

This commit adds support for the OLLAMA_HOST environment
variable. This variable can be used to specify the host to which
the client should connect. This is useful when the client is
running somewhere other than the host where the server is running.

The new api.FromEnv function is used to read configure clients from the
environment. Clients wishing to use the environment variable being
consistent with the Ollama CLI can use this new function.

* Update api/client.go

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>

* Update api/client.go

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>

---------

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2023-08-16 11:03:48 -04:00
Jeffrey Morgan
d15c7622b9 Update orca to orca-mini in README.md 2023-08-15 21:10:28 -04:00
Bruce MacDonald
1deb35ca64 use loaded llm for generating model file embeddings 2023-08-15 16:12:02 -03:00
Bruce MacDonald
e2de886831 do not regenerate embeddings 2023-08-15 16:10:22 -03:00
Bruce MacDonald
f0d7c2f5ea retry download on network errors 2023-08-15 15:07:19 -03:00
Bruce MacDonald
12052a7624 always remove from in progress map on download 2023-08-15 13:20:32 -03:00
Bruce MacDonald
23e1da778d Add context to api docs 2023-08-15 11:43:22 -03:00
Bruce MacDonald
326de48930 use loaded llm for embeddings 2023-08-15 10:50:54 -03:00
Bruce MacDonald
18f2cb0472 dont log fatal 2023-08-15 10:39:59 -03:00
Bruce MacDonald
53bc36d207 Update modelfile.md 2023-08-15 09:23:36 -03:00
Michael Yang
4dcf5c3e0b Merge pull request #349 from jmorganca/close-files
close open files
2023-08-14 16:15:58 -07:00
Michael Yang
d1b2f532b9 Merge pull request #350 from jmorganca/update-llama-cpp
update llama.cpp
2023-08-14 16:15:51 -07:00
Michael Yang
e26085b921 close open files 2023-08-14 16:08:06 -07:00
Michael Yang
f7b613332c update llama.cpp 2023-08-14 15:47:00 -07:00
Michael Yang
f594c8eb91 cross repo mount 2023-08-14 15:07:35 -07:00
Michael Yang
76b85bc0e9 set non-zero error code on error 2023-08-14 14:09:58 -07:00
Bruce MacDonald
af98a1773f update python example 2023-08-14 16:38:44 -03:00
Bruce MacDonald
9ae9a89883 Update modelfile.md 2023-08-14 16:26:53 -03:00
Bruce MacDonald
648f0974c6 python example 2023-08-14 15:27:13 -03:00
Bruce MacDonald
fc5230dffa Add context to api docs 2023-08-14 15:23:24 -03:00
Bruce MacDonald
2ab20095b3 log embedding eval timing 2023-08-14 12:15:55 -04:00
Bruce MacDonald
f020e1d519 always remove from in progress map on download 2023-08-14 13:09:20 -03:00
Bruce MacDonald
4b2d366c37 Update llama.go 2023-08-14 12:55:50 -03:00
Bruce MacDonald
56fd4e4ef2 log embedding eval timing 2023-08-14 12:51:31 -03:00
Bruce MacDonald
2c8b680b03 use file info for embeddings cache 2023-08-14 12:11:04 -03:00
Bruce MacDonald
99b6b60085 use model bin digest for embed digest 2023-08-14 11:57:12 -03:00
Bruce MacDonald
74f00474e1 Merge pull request #340 from gusanmaz/main
Update langchainpy.md
2023-08-14 09:38:42 -04:00
Bruce MacDonald
e9a9580bdd do not regenerate embeddings
- re-use previously evaluated embeddings when possible
- change embeddings digest identifier to be based on model name and embedded file path
2023-08-14 10:34:17 -03:00
Güvenç Usanmaz
4c33a9ac67 Update langchainpy.md
base_url value for Ollama object creation is corrected.
2023-08-14 12:12:56 +03:00
Jeffrey Morgan
22885aeaee update llama.cpp to f64d44a 2023-08-12 22:47:15 -04:00
Jeffrey Morgan
ed969d2a06 add LiteLLM to README.md 2023-08-12 20:47:57 -04:00
Patrick Devine
d9cf18e28d add maximum retries when pushing (#334) 2023-08-11 15:41:55 -07:00
Jeffrey Morgan
1556162c90 create .ollama directory if it doesnt exist 2023-08-11 15:35:55 -07:00
Jeffrey Morgan
148f0225c0 create .ollama directory if it doesnt exist 2023-08-11 15:33:11 -07:00
Matt Williams
4e07941b1e Merge pull request #329 from jmorganca/matt/tutorials
Add tutorials for using Langchain with ollama
2023-08-11 15:19:39 -07:00
Matt Williams
202c29c21a resolving bmacd comment
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-11 13:51:44 -07:00
Matt Williams
c1c871620a Update docs/tutorials/langchainjs.md
Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>
2023-08-11 13:48:46 -07:00
Matt Williams
a21a8bef56 Update docs/tutorials/langchainjs.md
Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>
2023-08-11 13:48:35 -07:00
Matt Williams
522726228a Update docs/tutorials.md
Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>
2023-08-11 13:48:16 -07:00
Patrick Devine
9770e3b325 Generate private/public keypair for use w/ auth (#324) 2023-08-11 10:58:23 -07:00
Michael Yang
d617823355 Merge pull request #333 from jmorganca/off-by-one
ggml: fix off by one error
2023-08-11 10:51:06 -07:00
Michael Yang
6ed991c8e2 ggml: fix off by one error
remove used Unknown FileType
2023-08-11 10:45:22 -07:00
Michael Chiang
e41576e768 Merge branch 'new-syntax' of https://github.com/jmorganca/ollama into new-syntax 2023-08-11 09:00:43 -07:00
Michael Chiang
155c1640f1 add demo video 2023-08-11 08:58:57 -07:00
Jeffrey Morgan
f7d4947573 update header note for privategpt example 2023-08-11 08:52:26 -07:00
Jeffrey Morgan
0d7a133b15 Update README.md for privategpt 2023-08-11 08:29:19 -07:00
Jeffrey Morgan
e863066144 clean up privategpt example 2023-08-11 00:34:52 -07:00
Jeffrey Morgan
89a92477ad fix README.md for privategpt example 2023-08-11 00:26:33 -07:00
Jeffrey Morgan
5cda9cdd13 add instructions to privategpt example to try another model 2023-08-11 00:23:31 -07:00
Jeffrey Morgan
e5914eb320 add venv instructions to privategpt example 2023-08-11 00:20:22 -07:00
Jeffrey Morgan
ab78f48ff8 more setup instructions for privategpt example 2023-08-11 00:19:25 -07:00
Jeffrey Morgan
b1c88eb978 add privategpt example 2023-08-11 00:18:13 -07:00
Jeffrey Morgan
efae43f932 update langchain examples 2023-08-10 23:35:19 -07:00
Matt Williams
d3ee1329e9 Add tutorials for using Langchain with ollama
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-10 21:27:37 -07:00
Jeffrey Morgan
700c719422 remove document example for now 2023-08-10 20:25:01 -07:00
Jeffrey Morgan
55aa4aaf0f add langchain examples 2023-08-10 20:23:50 -07:00
Jeffrey Morgan
820f95c4c4 add example 2023-08-10 20:13:47 -07:00
Michael Yang
3a05d3def7 Merge pull request #326 from asarturas/document-num-gqa-parameter
Document num_gqa parameter
2023-08-10 18:18:38 -07:00
Michael Yang
edac9c2446 Merge pull request #325 from jmorganca/mxyng/typo
s/parmeter/parameter/
2023-08-10 17:30:02 -07:00
Arturas Smorgun
d9c2687fd0 document default num_gqa to 1, as it's applicable to most models
Co-authored-by: Michael Yang <mxyng@pm.me>
2023-08-11 01:29:40 +01:00
Michael Yang
6517bcc53c Merge pull request #290 from jmorganca/add-adapter-layers
implement loading ggml lora adapters through the modelfile
2023-08-10 17:23:01 -07:00
Michael Yang
4f54f25b66 Merge pull request #272 from jmorganca/decode-ggml-2
Decode ggml 2: Use decoded values
2023-08-10 17:22:48 -07:00
Michael Yang
6a6828bddf Merge pull request #167 from jmorganca/decode-ggml
partial decode ggml bin for more info
2023-08-10 17:22:40 -07:00
Arturas Smorgun
c0e7a3b90e Document num_gqa parameter
It is required to be adjusted for some models, see https://github.com/jmorganca/ollama/issues/320 for more context
2023-08-11 00:58:09 +01:00
Michael Yang
f27bc261cf s/parmeter/parameter/ 2023-08-10 16:26:06 -07:00
Michael Yang
21e6197c0b Merge pull request #322 from jmorganca/no-comment-warning
no warning on comments
2023-08-10 16:24:41 -07:00
Michael Yang
75d7d681c9 Merge pull request #323 from jmorganca/fix-convert-int
fix could not convert int
2023-08-10 16:24:33 -07:00
Michael Yang
81d8d7b73f fix could not convert int 2023-08-10 16:24:17 -07:00
Michael Yang
5c0de09a07 Merge pull request #321 from jmorganca/fix-parameters
length check for parameters
2023-08-10 16:23:10 -07:00
Michael Yang
20bf000e55 no warning on comments 2023-08-10 16:22:38 -07:00
Michael Yang
40d0c4a1dc length check for parameters 2023-08-10 16:09:02 -07:00
Jeffrey Morgan
be889b2f81 add docs for /api/embeddings 2023-08-10 15:56:59 -07:00
Jeffrey Morgan
7e26a8df31 cmd: use environment variables for server options 2023-08-10 14:17:53 -07:00
Jeffrey Morgan
4ab1da38ba guard around id() 2023-08-10 14:11:54 -07:00
Patrick Devine
be989d89d1 Token auth (#314) 2023-08-10 11:34:25 -07:00
Soroush Javadi
bea683e3bf cmd: check GetBlobsPath error (#317)
The error returned by `server.GetBlobsPath` in `showLayer` was never
checked. Check the error and return if not nil. Also, make newlines at
the end of error messages consistent and fix a typo.
2023-08-10 09:57:49 -07:00
Jeffrey Morgan
178237d37f tweak README.md 2023-08-10 09:54:03 -07:00
Jeffrey Morgan
76a678af34 app: dont always show installer window on top now that it lives in the dock 2023-08-10 09:53:46 -07:00
Jeffrey Morgan
f65169b13e clean up cli flags 2023-08-10 09:28:56 -07:00
Jeffrey Morgan
040a5b9750 clean up cli flags 2023-08-10 09:27:03 -07:00
Michael Yang
37c9a8eea9 add lora docs 2023-08-10 09:23:40 -07:00
Michael Yang
6de5d032e1 implement loading ggml lora adapters through the modelfile 2023-08-10 09:23:39 -07:00
Michael Yang
d791df75dd check memory requirements before loading 2023-08-10 09:23:11 -07:00
Michael Yang
020a3b3530 disable gpu for q5_0, q5_1, q8_0 quants 2023-08-10 09:23:11 -07:00
Michael Yang
fccf8d179f partial decode ggml bin for more info 2023-08-10 09:23:10 -07:00
Bruce MacDonald
5b5cc9c9f1 embeddings endpoint 2023-08-10 11:49:55 -04:00
Bruce MacDonald
4b3507f036 embeddings endpoint
Co-Authored-By: Jeffrey Morgan <jmorganca@gmail.com>
2023-08-10 11:45:57 -04:00
Jun Tian
5ebce03c77 Add an example on multiline input (#311) 2023-08-10 08:22:28 -07:00
Bruce MacDonald
5e25f801ed fix a typo in the tweetwriter example Modelfile 2023-08-10 10:19:53 -04:00
Bruce MacDonald
8e1234b758 fix embeddings invalid values 2023-08-10 10:17:00 -04:00
Soroush Javadi
10885986b8 fix a typo in the tweetwriter example Modelfile 2023-08-10 15:12:48 +03:30
Bruce MacDonald
984c9c628c fix embeddings invalid values 2023-08-09 16:50:53 -04:00
Bruce MacDonald
43c40c500e add embed docs for modelfile 2023-08-09 16:14:58 -04:00
Bruce MacDonald
c4861360ec remove embed docs 2023-08-09 16:14:19 -04:00
Bruce MacDonald
9738ef85db allow for concurrent pulls of the same files 2023-08-09 11:35:24 -04:00
Bruce MacDonald
ac971c56d1 Update images.go 2023-08-09 11:31:54 -04:00
Bruce MacDonald
8228d166ce pr comments 2023-08-09 11:31:54 -04:00
Bruce MacDonald
907e6c56b3 unlock downloadu in case or requestDownload err 2023-08-09 11:31:54 -04:00
Bruce MacDonald
868e3b31c7 allow for concurrent pulls of the same files 2023-08-09 11:31:54 -04:00
Bruce MacDonald
09d8bf6730 fix build errors 2023-08-09 10:45:57 -04:00
Bruce MacDonald
7a5f3616fd embed text document in modelfile 2023-08-09 10:26:19 -04:00
Jeffrey Morgan
cff002b824 use content type application/x-ndjson for streaming responses 2023-08-08 21:38:10 -07:00
Jeffrey Morgan
55cf5021f0 update langchain example to include python 2023-08-08 21:03:10 -07:00
Jeffrey Morgan
f58caa5ab5 update README.md 2023-08-08 15:50:23 -07:00
Jeffrey Morgan
82df473ec9 use note syntax in README.md 2023-08-08 15:49:50 -07:00
Jeffrey Morgan
e184c1d035 Link to api.md in README.md 2023-08-08 15:48:47 -07:00
Jeffrey Morgan
371d4e5df3 docs: fix invalid json in api.md 2023-08-08 15:46:05 -07:00
Jeffrey Morgan
1f78e409b4 docs: format with prettier 2023-08-08 15:41:48 -07:00
Jeffrey Morgan
34a88cd776 docs: update api.md formatting 2023-08-08 15:41:19 -07:00
Bruce MacDonald
1bee2347be pr feedback
- defer closing llm on embedding
- do not override licenses
- remove debugging print line
- reformat model file docs
2023-08-08 17:01:37 -04:00
Jeffrey Morgan
a027a7dd65 add 0.0.0.0 as an allowed origin by default
Fixes #282
2023-08-08 13:39:50 -07:00
Jeffrey Morgan
22986ccb38 add llama2:70b to the model library list 2023-08-08 13:08:05 -07:00
Bruce MacDonald
884d78ceb3 allow embedding from model binary 2023-08-08 14:38:57 -04:00
Bruce MacDonald
3ceac05108 Add embedding docs 2023-08-08 14:04:11 -04:00
Bruce MacDonald
21ddcaa1f1 pr comments
- default to embeddings enabled
- move embedding logic for loaded model to request
- allow embedding full directory
- close llm on reload
2023-08-08 13:49:37 -04:00
Michael Yang
f2074ed4c0 Merge pull request #306 from jmorganca/default-keep-system
automatically set num_keep if num_keep < 0
2023-08-08 09:25:34 -07:00
Bruce MacDonald
a6f6d18f83 embed text document in modelfile 2023-08-08 11:27:17 -04:00
Bruce MacDonald
34a13a9d05 pass flags to serve to allow setting allowed-origins + host and port 2023-08-08 10:41:42 -04:00
Jeffrey Morgan
8713ac23a8 allow overriding template and system in /api/generate
Fixes #297
Fixes #296
2023-08-08 00:55:34 -04:00
Jeffrey Morgan
5eb712f962 trim whitespace before checking stop conditions
Fixes #295
2023-08-08 00:29:19 -04:00
Michael Yang
4dc5b117dd automatically set num_keep if num_keep < 0
num_keep defines how many tokens to keep in the context when truncating
inputs. if left to its default value of -1, the server will calculate
num_keep to be the left of the system instructions
2023-08-07 16:19:12 -07:00
Matt Williams
931a5f3cb9 Merge pull request #304 from jmorganca/matt/docs
missed a backtick
2023-08-07 15:14:06 -07:00
Jeffrey Morgan
639288bf2b make ollama binary executable on build 2023-08-07 18:10:37 -04:00
Jeffrey Morgan
d112c15d58 remove old library and web directories 2023-08-07 18:09:24 -04:00
Matt Williams
1267895e44 missed a backtick
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-07 13:53:49 -07:00
Matt Williams
089d03bc8d Merge pull request #289 from jmorganca/docs
First draft of API Docs
2023-08-07 13:46:22 -07:00
Matt Williams
e37f4c4f42 DockerIt example
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-07 13:45:22 -07:00
Michael Yang
ab3ced9d32 Merge pull request #276 from jmorganca/rope-freq
configurable rope frequency parameters
2023-08-07 13:39:38 -07:00
Matt Williams
0c52b4509b get rid of namespace and site
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-07 13:27:58 -07:00
Matt Williams
13aace3d34 clarify some more
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-07 13:21:54 -07:00
Matt Williams
2b3bb41598 model name format added
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-07 13:17:16 -07:00
cmiller01
93492f1e18 correct precedence of serve params (args over env over default) 2023-08-07 19:55:20 +00:00
Michael Chiang
54ba3e2ceb langchain JS integration (#302)
langchain JS integration
2023-08-07 12:21:36 -04:00
Matt Williams
4904cd8bcd update simpler code samples
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-07 07:40:38 -07:00
Matt Williams
8a45359ec6 Update docs/api.md
Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2023-08-07 07:33:05 -07:00
cmiller01
fb593b7bfc pass flags to serve to allow setting allowed-origins + host and port
* resolves: https://github.com/jmorganca/ollama/issues/300 and
https://github.com/jmorganca/ollama/issues/282

* example usage:
```
ollama serve --port 9999 --allowed-origins "http://foo.example.com,http://192.0.0.1"
```
2023-08-07 03:34:37 +00:00
Matt Williams
2544b8afa1 update as per Mike's comments
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 17:42:24 -07:00
Matt Williams
ac1b04f271 Update docs/api.md
Co-authored-by: Michael Yang <mxyng@pm.me>
2023-08-04 17:40:52 -07:00
Matt Williams
123fdeb919 Update docs/api.md
Co-authored-by: Michael Yang <mxyng@pm.me>
2023-08-04 17:38:52 -07:00
Matt Williams
5c82bf95d1 Update docs/api.md
Co-authored-by: Michael Yang <mxyng@pm.me>
2023-08-04 17:12:24 -07:00
Matt Williams
38a9b1618c missed some quotes
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 16:09:07 -07:00
Matt Williams
c18be72a3b complete 1st draft of api docs
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 16:08:11 -07:00
Matt Williams
a101fe51a7 clean up
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 12:56:41 -07:00
Bruce MacDonald
06fc48ad66 Update README.md (#285)
Ollama now supports Intel Macs
2023-08-04 15:45:55 -04:00
Matt Williams
d93e2f9210 fleshing out response
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 12:38:58 -07:00
Matt Williams
31edc829fc continuing
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 12:30:23 -07:00
Matt Williams
b31104768c filling out generate
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 12:27:47 -07:00
Matt Williams
b662d9fd8c starting to build out some docs
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 11:55:00 -07:00
Michael Yang
b9f4d67554 configurable rope frequency parameters 2023-08-03 22:11:58 -07:00
Jeffrey Morgan
e3fb1fd3f1 server: compare options correctly 2023-08-03 15:55:40 -04:00
Michael Yang
29b897f525 Merge pull request #253 from jmorganca/upload
use a pipe to push to registry with progress
2023-08-03 12:11:23 -07:00
Michael Yang
85aeb42869 Merge pull request #270 from jmorganca/update-llama-cpp
update llama.cpp
2023-08-03 12:09:00 -07:00
Michael Yang
c5bcf32823 update llama.cpp 2023-08-03 11:50:24 -07:00
Michael Yang
a71ff3f6a2 use a pipe to push to registry with progress
switch to a monolithic upload instead of a chunk upload through a pipe
to report progress
2023-08-03 10:37:13 -07:00
Michael Chiang
f0b365a478 Merge pull request #268 from jmorganca/mchiang0610-patch-2
Update README.md
2023-08-03 11:23:31 -04:00
Michael Chiang
df8048fecd Update README.md 2023-08-03 11:22:57 -04:00
Michael Yang
da2459d519 Update README.md (#265) 2023-08-02 22:38:32 -04:00
Bruce MacDonald
bd6d741d87 tell users to check the server error logs 2023-08-02 17:08:11 -04:00
Bruce MacDonald
8b1e791820 allow specifying zero values in modelfile 2023-08-02 17:07:53 -04:00
Jeffrey Morgan
03cff3a225 server: reset digest at end of generate 2023-08-02 16:15:44 -04:00
Michael Yang
cc509a994e Merge pull request #260 from jmorganca/embed-ggml-metal
override ggml-metal if the file is different
2023-08-02 13:01:46 -07:00
Michael Yang
0e79e52ddd override ggml-metal if the file is different 2023-08-02 12:50:30 -07:00
Jeffrey Morgan
6fbb380076 hide dock icon if window closes 2023-08-02 11:05:34 -04:00
Bruce MacDonald
8f8b6288ac check server is running before running command 2023-08-02 10:51:23 -04:00
Michael Yang
b98096389d Merge pull request #255 from jmorganca/update-llama-cpp
Update llama cpp
2023-08-01 17:18:33 -07:00
Michael Yang
74a5f7e698 no gpu for 70B model 2023-08-01 17:12:50 -07:00
Michael Yang
7a1c3e62dc update llama.cpp 2023-08-01 16:54:01 -07:00
Jeffrey Morgan
da52f5bfdd run npm install on build 2023-08-01 17:41:25 -04:00
Bruce MacDonald
50e87c6691 read from os executable 2023-08-01 16:01:55 -04:00
Gerd
e4a970ece1 Add model update to README.md (#252) 2023-08-01 15:06:33 -04:00
Jeffrey Morgan
4ca43a694c remove newlines between list items in README.md 2023-08-01 15:05:39 -04:00
Bruce MacDonald
765994362c use head to check heartbeat 2023-08-01 14:50:38 -04:00
Bruce MacDonald
40a25bf8c3 pr comments 2023-08-01 13:48:48 -04:00
Bruce MacDonald
1c5a8770ee read runner parameter options from map
- read runner options from map to see what was specified explicitly and overwrite zero values
2023-08-01 13:38:19 -04:00
Bruce MacDonald
daa0d1de7a allow specifying zero values in modelfile 2023-08-01 13:37:50 -04:00
Jeffrey Morgan
58daeb962a add llama2-uncensored to model list 2023-08-01 11:25:01 -04:00
Jeffrey Morgan
528bafa585 cache loaded model 2023-08-01 11:24:18 -04:00
Michael Chiang
81f75696e2 Merge pull request #251 from jmorganca/mchiang0610-patch-2
add examples of projects using Ollama
2023-08-01 11:16:14 -04:00
Michael Chiang
8bdcf894bd Update README.md
add examples of projects using Ollama
2023-08-01 11:14:54 -04:00
Michael Chiang
fe530423a5 Merge pull request #249 from sestinj/main
Add "Awesome projects built with Ollama" section to README, including Continue
2023-08-01 08:07:50 -07:00
Michael Yang
05e390205b Merge pull request #250 from jmorganca/fixes
Fixes
2023-07-31 21:47:42 -07:00
Michael Yang
872011630a fix license 2023-07-31 21:46:48 -07:00
Michael Yang
203fdbc4b8 check err 2023-07-31 21:46:48 -07:00
Michael Yang
70e0ab6b3d remove unnecessary fmt.Sprintf 2023-07-31 21:46:47 -07:00
Michael Yang
319f078dd9 remove -Werror
there are compile warnings on Linux which -Werror elevates to errors,
preventing compile
2023-07-31 21:45:56 -07:00
Jeffrey Morgan
9968153729 fix Go warnings 2023-07-31 21:37:40 -04:00
Jeffrey Morgan
7da249fcc1 only build metal for darwin,arm target 2023-07-31 21:35:23 -04:00
Bruce MacDonald
f529626c6c log prediction failures 2023-07-31 17:39:20 -04:00
Bruce MacDonald
36d6081ed1 find symlink of mac app 2023-07-31 17:38:10 -04:00
Nate Sesti
aadedda486 Update README.md 2023-07-31 13:59:39 -07:00
Bruce MacDonald
671eec6da9 log prediction failures 2023-07-31 16:46:37 -04:00
Bruce MacDonald
e72fe7945f check server is running before running command 2023-07-31 16:25:57 -04:00
Bruce MacDonald
d1c098b038 tell users to check the server error logs 2023-07-31 11:49:33 -04:00
Jeffrey Morgan
90ba0b80c7 fix build_darwin.sh 2023-07-29 22:36:59 -04:00
Patrick Devine
39bb25d5f6 allow multiline text using three double-quotes (#239) 2023-07-29 13:35:23 -07:00
Michael Yang
eadee46840 Merge pull request #236 from jmorganca/check-os-walk
check os.Walk err
2023-07-28 14:14:21 -07:00
Jeffrey Morgan
2e2e624d21 app: use notarytool for notarizing 2023-07-28 12:23:56 -07:00
Jeffrey Morgan
ed832ce3b7 darwin build script 2023-07-28 12:23:27 -07:00
Michael Yang
227da16909 Merge pull request #235 from jmorganca/rm-ioutil
remove io/ioutil import
2023-07-28 12:19:06 -07:00
Michael Yang
bd58528fbd check os.Walk err 2023-07-28 12:15:31 -07:00
Michael Yang
c5e447a359 remove io/ioutil import
ioutil is deprecated
2023-07-28 12:06:03 -07:00
Michael Yang
fc40a4f166 Merge pull request #234 from jmorganca/fix-parse-license
use max scan token size to hold large objects
2023-07-28 12:03:51 -07:00
Michael Yang
9c7f30d31c use max scan token size to hold large objects 2023-07-28 11:43:31 -07:00
Bruce MacDonald
6ed3ec0cb3 Allow specifying stop conditions in Modelfile 2023-07-28 12:31:08 -04:00
Bruce MacDonald
47bda0b860 add stop to docs 2023-07-28 12:30:27 -04:00
Jeffrey Morgan
c75cafdb58 build for universal architecture on macos 2023-07-28 12:18:11 -04:00
Bruce MacDonald
f5cbcb08e6 specify stop params separately 2023-07-28 11:29:00 -04:00
Jeffrey Morgan
67b6f8ba86 add ggml-metal.metal to .gitignore 2023-07-28 11:04:21 -04:00
Bruce MacDonald
184ad8f057 allow specifying stop conditions in modelfile 2023-07-28 11:02:04 -04:00
Jeffrey Morgan
822a0e36eb lower batch size to 512 2023-07-28 10:56:21 -04:00
Jeffrey Morgan
18b6b601ad app: cleanup README.md 2023-07-28 10:51:41 -04:00
Bruce MacDonald
0345070dfa update model file docs 2023-07-28 10:33:52 -04:00
Jeffrey Morgan
dffc8b6e09 update llama.cpp to d91f3f0 2023-07-28 08:07:48 -04:00
Jeffrey Morgan
0871083776 app: fix tray icon color scheme in dark mode 2023-07-28 07:03:46 -04:00
Michael Yang
e5b26c3aa2 Merge pull request #221 from jmorganca/embed-metal
embed ggml-metal.metal
2023-07-27 17:24:41 -07:00
Michael Yang
3549676678 embed ggml-metal.metal 2023-07-27 17:23:29 -07:00
Michael Yang
8fa477fadb Merge pull request #225 from jmorganca/stop-conditions
add stop conditions
2023-07-27 17:20:56 -07:00
Michael Yang
fadf75f99d add stop conditions 2023-07-27 17:00:47 -07:00
Patrick Devine
01d155c969 show system/template/license layers from cmd prompt (#223) 2023-07-27 16:58:40 -07:00
Michael Yang
5685c16d4e Merge pull request #211 from jmorganca/update-llama-cpp
update llama.cpp
2023-07-27 16:57:03 -07:00
Michael Yang
db77dfe01f Merge pull request #102 from jmorganca/session-id
Session
2023-07-27 16:46:29 -07:00
Michael Yang
ad3a7d0e2c add NumGQA 2023-07-27 14:05:11 -07:00
Michael Yang
18ffeeec45 update llama.cpp 2023-07-27 14:05:11 -07:00
Jeffrey Morgan
688661ab9b increase default batch size to 1024 2023-07-27 16:51:01 -04:00
Michael Chiang
36ad90e8e3 Merge pull request #231 from jmorganca/mchiang0610-discord
Update discord invite link
2023-07-27 15:43:52 -04:00
Michael Chiang
6fff59c637 Update discord invite link
Update discord invite link
2023-07-27 15:43:15 -04:00
Bruce MacDonald
fee7687cf3 Update modelfile.md 2023-07-27 15:15:10 -04:00
Bruce MacDonald
d3bfb4889c Update README.md 2023-07-27 15:13:50 -04:00
Bruce MacDonald
1ac38ec89c improve modelfile docs 2023-07-27 15:13:04 -04:00
Michael Yang
1ad8266473 Merge pull request #226 from jmorganca/fix-modelfile-quotes
refactor scan multiline for reuse
2023-07-27 11:45:41 -07:00
Michael Yang
f5ac8ddfb4 refactor scan multiline for reuse 2023-07-27 11:30:51 -07:00
Michael Yang
cca61181cb sample metrics 2023-07-27 09:31:44 -07:00
Michael Yang
c490416189 lock on llm.lock(); decrease batch size 2023-07-27 09:31:44 -07:00
Michael Yang
f62a882760 add session expiration 2023-07-27 09:31:44 -07:00
Michael Yang
3003fc03fc update predict code 2023-07-27 09:31:44 -07:00
Michael Yang
32aec66e6a add load duration 2023-07-27 09:31:44 -07:00
Michael Yang
35af37a2cb session id 2023-07-27 09:31:44 -07:00
Jeffrey Morgan
dbb3174cbc app: fix #218 and keep dock open on install 2023-07-27 10:53:38 -04:00
Jeffrey Morgan
31673d26d0 app: quit other instance when starting 2023-07-27 00:57:25 -04:00
Jeffrey Morgan
8ba0f328af clobber release artifacts 2023-07-26 18:58:28 -04:00
Jeffrey Morgan
d0e934b497 app: tray cleanup 2023-07-26 14:24:56 -04:00
Jeffrey Morgan
e751e47d70 app: remove dialog, icons for updates 2023-07-26 14:04:36 -04:00
Jeffrey Morgan
19d0f2b4cc publish as pre-release first 2023-07-26 10:48:49 -04:00
Jeffrey Morgan
c48f07f821 app: dont advance on error 2023-07-26 10:46:43 -04:00
Jeffrey Morgan
dc642aa07d web: skip pre-releases 2023-07-25 17:11:57 -04:00
Bruce MacDonald
f1ff892fdd pull model on make if not present locally 2023-07-25 16:53:01 -04:00
Jeffrey Morgan
3f2a100465 app: log app errors to console 2023-07-25 15:42:04 -04:00
Michael Yang
95397416f3 Merge pull request #212 from jmorganca/fix-multiline-parsing
fix multiline string
2023-07-25 11:53:51 -07:00
Michael Yang
8a86aae019 Merge pull request #209 from jmorganca/k-quants
enable k quants
2023-07-25 11:53:29 -07:00
Michael Yang
24c2c77057 fix multiline string
the data needs to remove the multiline quotes but include the command:

e.g.

TEMPLATE """
my template values
"""

should be

TEMPLATE
my template values

after scanning
2023-07-25 11:51:43 -07:00
Michael Yang
5614984f06 Merge pull request #189 from Mohit-Gaur/main
Improve command parsing and multiline string handling
2023-07-25 11:28:10 -07:00
Bruce MacDonald
4c1caa3733 download models when creating from modelfile 2023-07-25 14:25:13 -04:00
Bruce MacDonald
12ab8f8f5f Revert "pull model on make if not present locally"
This reverts commit 360a10ace391a674de60aa7b9b8cb65e8074027c.
2023-07-25 14:18:46 -04:00
Bruce MacDonald
8ebbd12f21 pull model on make if not present locally 2023-07-25 14:18:46 -04:00
Eva Ho
07971759fa fix typo 2023-07-25 13:30:52 -04:00
Mohit Gaur
f5f79049c2 Incorporate code review improvements 2023-07-25 22:52:23 +05:30
Michael Yang
726bc647b2 enable k quants 2023-07-25 08:39:58 -07:00
Bruce MacDonald
af9039a167 better error message when model not found on pull 2023-07-25 10:30:48 -04:00
Bruce MacDonald
07ed69bc37 remove reduandant err var 2023-07-25 10:30:14 -04:00
Michael Yang
0deb3767fc Merge pull request #205 from jmorganca/accelerate
enable accelerate
2023-07-24 20:06:05 -07:00
Michael Yang
cb55fa9270 enable accelerate 2023-07-24 17:14:45 -07:00
Michael Yang
93bc9f17a1 Merge pull request #192 from jmorganca/update-development.md
update development.md
2023-07-24 16:13:22 -07:00
Bruce MacDonald
536028c35a better error message when model not found on pull 2023-07-24 17:48:17 -04:00
Michael Chiang
aedf3d1f38 Merge pull request #196 from isbkch/main
add devops-engineer example
2023-07-24 17:10:22 -04:00
iLyas Bakouch
91d927abc5 Update Modelfile 2023-07-24 16:43:11 -04:00
iLyas Bakouch
ba8df10a43 Update examples/devops-engineer/Modelfile
Co-authored-by: Jeffrey Morgan <251292+jmorganca@users.noreply.github.com>
2023-07-24 16:42:08 -04:00
Bruce MacDonald
abf614804b remove file on digest mismatch 2023-07-24 21:59:12 +02:00
Bruce MacDonald
a0dbbb23c4 truncate file size on resume 2023-07-24 21:58:32 +02:00
Bruce MacDonald
0fd6278446 do not panic server if file cannot be opened 2023-07-24 15:24:34 -04:00
Bruce MacDonald
29fe07f0cc make response errors unique for error trace 2023-07-24 21:21:18 +02:00
Bruce MacDonald
abfc73d31e make response errors unique for error trace 2023-07-24 15:04:21 -04:00
Bruce MacDonald
5a5ca8e7ff remove file on digest mismatch 2023-07-24 14:53:01 -04:00
Ilyas Bakouch
f24a6f5988 add devops-engineer example 2023-07-24 14:44:44 -04:00
Bruce MacDonald
fdbef6c95e truncate file size on resume 2023-07-24 14:36:19 -04:00
Michael Yang
24e43e3212 update development.md 2023-07-24 09:43:57 -07:00
Patrick Devine
4cb42ca55e add copy command (#191) 2023-07-24 11:27:28 -04:00
Michael Yang
ec5e22ac85 Merge pull request #174 from jmorganca/tokenize
allocate a large enough tokens slice
2023-07-24 08:22:51 -07:00
Mohit Gaur
ed89da92b4 Improve command parsing and multiline string handling 2023-07-24 18:11:13 +05:30
Jeffrey Morgan
a3297fed41 add /api/create docs to readme 2023-07-23 18:01:05 -04:00
Patrick Devine
88c55199f8 change push to chunked uploads from monolithic (#179) 2023-07-22 17:31:26 -07:00
Michael Yang
b71c67b6ba allocate a large enough tokens slice 2023-07-21 23:05:15 -07:00
132 changed files with 18047 additions and 9011 deletions

View File

@@ -4,4 +4,5 @@ llama/build
.vscode
ollama
app
web
web
.env

1
.gitignore vendored
View File

@@ -5,3 +5,4 @@
.swp
dist
ollama
/ggml-metal.metal

View File

@@ -9,13 +9,13 @@
[![Discord](https://dcbadge.vercel.app/api/server/ollama?style=flat&compact=true)](https://discord.gg/ollama)
> Note: Ollama is in early preview. Please report any issues you find.
Run, create, and share large language models (LLMs).
> Note: Ollama is in early preview. Please report any issues you find.
## Download
- [Download](https://ollama.ai/download) for macOS on Apple Silicon (Intel coming soon)
- [Download](https://ollama.ai/download) for macOS
- Download for Windows and Linux (coming soon)
- Build [from source](#building)
@@ -29,16 +29,21 @@ ollama run llama2
## Model library
`ollama` includes a library of open-source models:
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library "ollama model library")
| Model | Parameters | Size | Download |
| ------------------------ | ---------- | ----- | --------------------------- |
| Llama2 | 7B | 3.8GB | `ollama pull llama2` |
| Llama2 13B | 13B | 7.3GB | `ollama pull llama2:13b` |
| Orca Mini | 3B | 1.9GB | `ollama pull orca` |
| Vicuna | 7B | 3.8GB | `ollama pull vicuna` |
| Nous-Hermes | 13B | 7.3GB | `ollama pull nous-hermes` |
| Wizard Vicuna Uncensored | 13B | 7.3GB | `ollama pull wizard-vicuna` |
Here are some example open-source models that can be downloaded:
| Model | Parameters | Size | Download |
| ------------------------ | ---------- | ----- | ------------------------------- |
| Llama2 | 7B | 3.8GB | `ollama pull llama2` |
| Llama2 13B | 13B | 7.3GB | `ollama pull llama2:13b` |
| Llama2 70B | 70B | 39GB | `ollama pull llama2:70b` |
| Llama2 Uncensored | 7B | 3.8GB | `ollama pull llama2-uncensored` |
| Orca Mini | 3B | 1.9GB | `ollama pull orca-mini` |
| Vicuna | 7B | 3.8GB | `ollama pull vicuna` |
| Nous-Hermes | 7B | 3.8GB | `ollama pull nous-hermes` |
| Nous-Hermes 13B | 13B | 7.3GB | `ollama pull nous-hermes:13b` |
| Wizard Vicuna Uncensored | 13B | 7.3GB | `ollama pull wizard-vicuna` |
> Note: You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
@@ -52,6 +57,15 @@ ollama run llama2
Hello! How can I help you today?
```
For multiline input, you can wrap text with `"""`:
```
>>> """Hello,
... world!
... """
I'm a basic program that prints the famous "Hello, world!" message to the console.
```
### Create a custom model
Pull a base model:
@@ -60,6 +74,8 @@ Pull a base model:
ollama pull llama2
```
> To update a model to the latest version, run `ollama pull llama2` again. The model will be updated (if necessary).
Create a `Modelfile`:
```
@@ -83,7 +99,7 @@ ollama run mario
Hello! It's your friend Mario.
```
For more examples, see the [examples](./examples) directory.
For more examples, see the [examples](./examples) directory. For more information on creating a Modelfile, see the [Modelfile](./docs/modelfile.md) documentation.
### Pull a model from the registry
@@ -128,10 +144,23 @@ Finally, run a model!
## REST API
### `POST /api/generate`
> See the [API documentation](./docs/api.md) for all endpoints.
Generate text from a model.
Ollama has an API for running and managing models. For example to generate text from a model:
```
curl -X POST http://localhost:11434/api/generate -d '{"model": "llama2", "prompt":"Why is the sky blue?"}'
curl -X POST http://localhost:11434/api/generate -d '{
"model": "llama2",
"prompt":"Why is the sky blue?"
}'
```
## Tools using Ollama
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with a question-answering [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa).
- [Continue](https://github.com/continuedev/continue) - embeds Ollama inside Visual Studio Code. The extension lets you highlight code to add to the prompt, ask questions in the sidebar, and generate code inline.
- [LiteLLM](https://github.com/BerriAI/litellm) a lightweight python package to simplify LLM API calls
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) - interact with Ollama as a chatbot on Discord.
- [Raycast Ollama](https://github.com/MassimilianoPasquini97/raycast_ollama) - Raycast extension to use Ollama for local llama inference on Raycast.
- [Simple HTML UI for Ollama](https://github.com/rtcfirefly/ollama-ui)
- [Emacs client](https://github.com/zweifisch/ollama) for Ollama

View File

@@ -9,10 +9,21 @@ import (
"io"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"github.com/jmorganca/ollama/version"
)
const DefaultHost = "localhost:11434"
var (
envHost = os.Getenv("OLLAMA_HOST")
)
type Client struct {
base url.URL
Base url.URL
HTTP http.Client
Headers http.Header
}
@@ -33,16 +44,34 @@ func checkError(resp *http.Response, body []byte) error {
return apiError
}
func NewClient(hosts ...string) *Client {
host := "127.0.0.1:11434"
if len(hosts) > 0 {
host = hosts[0]
// Host returns the default host to use for the client. It is determined in the following order:
// 1. The OLLAMA_HOST environment variable
// 2. The default host (localhost:11434)
func Host() string {
if envHost != "" {
return envHost
}
return DefaultHost
}
// FromEnv creates a new client using Host() as the host. An error is returns
// if the host is invalid.
func FromEnv() (*Client, error) {
h := Host()
if !strings.HasPrefix(h, "http://") && !strings.HasPrefix(h, "https://") {
h = "http://" + h
}
return &Client{
base: url.URL{Scheme: "http", Host: host},
HTTP: http.Client{},
u, err := url.Parse(h)
if err != nil {
return nil, fmt.Errorf("could not parse host: %w", err)
}
if u.Port() == "" {
u.Host += ":11434"
}
return &Client{Base: *u, HTTP: http.Client{}}, nil
}
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
@@ -57,21 +86,21 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
reqBody = bytes.NewReader(data)
}
url := c.base.JoinPath(path).String()
req, err := http.NewRequestWithContext(ctx, method, url, reqBody)
requestURL := c.Base.JoinPath(path)
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), reqBody)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Accept", "application/json")
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
for k, v := range c.Headers {
req.Header[k] = v
request.Header[k] = v
}
respObj, err := c.HTTP.Do(req)
respObj, err := c.HTTP.Do(request)
if err != nil {
return err
}
@@ -105,13 +134,15 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
buf = bytes.NewBuffer(bts)
}
request, err := http.NewRequestWithContext(ctx, method, c.base.JoinPath(path).String(), buf)
requestURL := c.Base.JoinPath(path)
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), buf)
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Accept", "application/json")
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
response, err := http.DefaultClient.Do(request)
if err != nil {
@@ -131,7 +162,7 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
}
if errorResponse.Error != "" {
return fmt.Errorf("stream: %s", errorResponse.Error)
return fmt.Errorf(errorResponse.Error)
}
if response.StatusCode >= 400 {
@@ -189,11 +220,11 @@ func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc
})
}
type CreateProgressFunc func(CreateProgress) error
type CreateProgressFunc func(ProgressResponse) error
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
var resp CreateProgress
var resp ProgressResponse
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
@@ -210,9 +241,23 @@ func (c *Client) List(ctx context.Context) (*ListResponse, error) {
return &lr, nil
}
func (c *Client) Copy(ctx context.Context, req *CopyRequest) error {
if err := c.do(ctx, http.MethodPost, "/api/copy", req, nil); err != nil {
return err
}
return nil
}
func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
if err := c.do(ctx, http.MethodDelete, "/api/delete", req, nil); err != nil {
return err
}
return nil
}
func (c *Client) Heartbeat(ctx context.Context) error {
if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil {
return err
}
return nil
}

View File

@@ -1,9 +1,14 @@
package api
import (
"encoding/json"
"fmt"
"log"
"math"
"os"
"reflect"
"runtime"
"strings"
"time"
)
@@ -28,11 +33,24 @@ func (e StatusError) Error() string {
}
type GenerateRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Context []int `json:"context,omitempty"`
Model string `json:"model"`
Prompt string `json:"prompt"`
System string `json:"system"`
Template string `json:"template"`
Context []int `json:"context,omitempty"`
Options `json:"options"`
Options map[string]interface{} `json:"options"`
}
type EmbeddingRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Options map[string]interface{} `json:"options"`
}
type EmbeddingResponse struct {
Embedding []float64 `json:"embedding"`
}
type CreateRequest struct {
@@ -40,14 +58,15 @@ type CreateRequest struct {
Path string `json:"path"`
}
type CreateProgress struct {
Status string `json:"status"`
}
type DeleteRequest struct {
Name string `json:"name"`
}
type CopyRequest struct {
Source string `json:"source"`
Destination string `json:"destination"`
}
type PullRequest struct {
Name string `json:"name"`
Insecure bool `json:"insecure,omitempty"`
@@ -79,6 +98,10 @@ type ListResponseModel struct {
Size int `json:"size"`
}
type TokenResponse struct {
Token string `json:"token"`
}
type GenerateResponse struct {
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
@@ -88,6 +111,9 @@ type GenerateResponse struct {
Context []int `json:"context,omitempty"`
TotalDuration time.Duration `json:"total_duration,omitempty"`
LoadDuration time.Duration `json:"load_duration,omitempty"`
SampleCount int `json:"sample_count,omitempty"`
SampleDuration time.Duration `json:"sample_duration,omitempty"`
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"`
EvalCount int `json:"eval_count,omitempty"`
@@ -99,6 +125,19 @@ func (r *GenerateResponse) Summary() {
fmt.Fprintf(os.Stderr, "total duration: %v\n", r.TotalDuration)
}
if r.LoadDuration > 0 {
fmt.Fprintf(os.Stderr, "load duration: %v\n", r.LoadDuration)
}
if r.SampleCount > 0 {
fmt.Fprintf(os.Stderr, "sample count: %d token(s)\n", r.SampleCount)
}
if r.SampleDuration > 0 {
fmt.Fprintf(os.Stderr, "sample duration: %s\n", r.SampleDuration)
fmt.Fprintf(os.Stderr, "sample rate: %.2f tokens/s\n", float64(r.SampleCount)/r.SampleDuration.Seconds())
}
if r.PromptEvalCount > 0 {
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
}
@@ -125,50 +164,142 @@ type Options struct {
UseNUMA bool `json:"numa,omitempty"`
// Model options
NumCtx int `json:"num_ctx,omitempty"`
NumBatch int `json:"num_batch,omitempty"`
NumGPU int `json:"num_gpu,omitempty"`
MainGPU int `json:"main_gpu,omitempty"`
LowVRAM bool `json:"low_vram,omitempty"`
F16KV bool `json:"f16_kv,omitempty"`
LogitsAll bool `json:"logits_all,omitempty"`
VocabOnly bool `json:"vocab_only,omitempty"`
UseMMap bool `json:"use_mmap,omitempty"`
UseMLock bool `json:"use_mlock,omitempty"`
EmbeddingOnly bool `json:"embedding_only,omitempty"`
NumCtx int `json:"num_ctx,omitempty"`
NumKeep int `json:"num_keep,omitempty"`
NumBatch int `json:"num_batch,omitempty"`
NumGQA int `json:"num_gqa,omitempty"`
NumGPU int `json:"num_gpu,omitempty"`
MainGPU int `json:"main_gpu,omitempty"`
LowVRAM bool `json:"low_vram,omitempty"`
F16KV bool `json:"f16_kv,omitempty"`
LogitsAll bool `json:"logits_all,omitempty"`
VocabOnly bool `json:"vocab_only,omitempty"`
UseMMap bool `json:"use_mmap,omitempty"`
UseMLock bool `json:"use_mlock,omitempty"`
EmbeddingOnly bool `json:"embedding_only,omitempty"`
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
// Predict options
RepeatLastN int `json:"repeat_last_n,omitempty"`
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopK int `json:"top_k,omitempty"`
TopP float32 `json:"top_p,omitempty"`
TFSZ float32 `json:"tfs_z,omitempty"`
TypicalP float32 `json:"typical_p,omitempty"`
Mirostat int `json:"mirostat,omitempty"`
MirostatTau float32 `json:"mirostat_tau,omitempty"`
MirostatEta float32 `json:"mirostat_eta,omitempty"`
RepeatLastN int `json:"repeat_last_n,omitempty"`
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopK int `json:"top_k,omitempty"`
TopP float32 `json:"top_p,omitempty"`
TFSZ float32 `json:"tfs_z,omitempty"`
TypicalP float32 `json:"typical_p,omitempty"`
Mirostat int `json:"mirostat,omitempty"`
MirostatTau float32 `json:"mirostat_tau,omitempty"`
MirostatEta float32 `json:"mirostat_eta,omitempty"`
PenalizeNewline bool `json:"penalize_newline,omitempty"`
Stop []string `json:"stop,omitempty"`
NumThread int `json:"num_thread,omitempty"`
}
func (opts *Options) FromMap(m map[string]interface{}) error {
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct
// build map of json struct tags to their types
jsonOpts := make(map[string]reflect.StructField)
for _, field := range reflect.VisibleFields(typeOpts) {
jsonTag := strings.Split(field.Tag.Get("json"), ",")[0]
if jsonTag != "" {
jsonOpts[jsonTag] = field
}
}
for key, val := range m {
if opt, ok := jsonOpts[key]; ok {
field := valueOpts.FieldByName(opt.Name)
if field.IsValid() && field.CanSet() {
if val == nil {
continue
}
switch field.Kind() {
case reflect.Int:
switch t := val.(type) {
case int64:
field.SetInt(t)
case float64:
// when JSON unmarshals numbers, it uses float64, not int
field.SetInt(int64(t))
default:
log.Printf("could not convert model parameter %v to int, skipped", key)
}
case reflect.Bool:
val, ok := val.(bool)
if !ok {
log.Printf("could not convert model parameter %v to bool, skipped", key)
continue
}
field.SetBool(val)
case reflect.Float32:
// JSON unmarshals to float64
val, ok := val.(float64)
if !ok {
log.Printf("could not convert model parameter %v to float32, skipped", key)
continue
}
field.SetFloat(val)
case reflect.String:
val, ok := val.(string)
if !ok {
log.Printf("could not convert model parameter %v to string, skipped", key)
continue
}
field.SetString(val)
case reflect.Slice:
// JSON unmarshals to []interface{}, not []string
val, ok := val.([]interface{})
if !ok {
log.Printf("could not convert model parameter %v to slice, skipped", key)
continue
}
// convert []interface{} to []string
slice := make([]string, len(val))
for i, item := range val {
str, ok := item.(string)
if !ok {
log.Printf("could not convert model parameter %v to slice of strings, skipped", key)
continue
}
slice[i] = str
}
field.Set(reflect.ValueOf(slice))
default:
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
}
}
}
}
return nil
}
func DefaultOptions() Options {
return Options{
Seed: -1,
UseNUMA: false,
NumCtx: 2048,
NumBatch: 512,
NumGPU: 1,
LowVRAM: false,
F16KV: true,
UseMMap: true,
UseMLock: false,
NumCtx: 2048,
NumKeep: -1,
NumBatch: 512,
NumGPU: 1,
NumGQA: 1,
LowVRAM: false,
F16KV: true,
UseMMap: true,
UseMLock: false,
RopeFrequencyBase: 10000.0,
RopeFrequencyScale: 1.0,
EmbeddingOnly: true,
RepeatLastN: 512,
RepeatLastN: 64,
RepeatPenalty: 1.1,
FrequencyPenalty: 0.0,
PresencePenalty: 0.0,
@@ -180,7 +311,37 @@ func DefaultOptions() Options {
Mirostat: 0,
MirostatTau: 5.0,
MirostatEta: 0.1,
PenalizeNewline: true,
NumThread: runtime.NumCPU(),
}
}
type Duration struct {
time.Duration
}
func (d *Duration) UnmarshalJSON(b []byte) (err error) {
var v any
if err := json.Unmarshal(b, &v); err != nil {
return err
}
d.Duration = 5 * time.Minute
switch t := v.(type) {
case float64:
if t < 0 {
t = math.MaxFloat64
}
d.Duration = time.Duration(t)
case string:
d.Duration, err = time.ParseDuration(t)
if err != nil {
return err
}
}
return nil
}

View File

@@ -1,7 +1,5 @@
# Desktop
_Note: the Ollama desktop app is a work in progress and is not ready yet for general use._
This app builds upon Ollama to provide a desktop experience for running models.
## Developing
@@ -9,19 +7,15 @@ This app builds upon Ollama to provide a desktop experience for running models.
First, build the `ollama` binary:
```
make -C ..
cd ..
go build .
```
Then run the desktop app with `npm start`:
```
cd app
npm install
npm start
```
## Coming soon
- Browse the latest available models on Hugging Face and other sources
- Keep track of previous conversations with models
- Switch quickly between models
- Connect to remote Ollama servers to run models

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 B

View File

Before

Width:  |  Height:  |  Size: 741 B

After

Width:  |  Height:  |  Size: 741 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 440 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 763 B

BIN
app/assets/iconTemplate.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 447 B

View File

Before

Width:  |  Height:  |  Size: 891 B

After

Width:  |  Height:  |  Size: 891 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 443 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 844 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 403 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 445 B

View File

@@ -18,12 +18,16 @@ const config: ForgeConfig = {
asar: true,
icon: './assets/icon.icns',
extraResource: [
'../ollama',
path.join(__dirname, './assets/ollama_icon_16x16Template.png'),
path.join(__dirname, './assets/ollama_icon_16x16Template@2x.png'),
path.join(__dirname, './assets/ollama_outline_icon_16x16Template.png'),
path.join(__dirname, './assets/ollama_outline_icon_16x16Template@2x.png'),
...(process.platform === 'darwin' ? ['../llama/ggml-metal.metal'] : []),
'../dist/ollama',
path.join(__dirname, './assets/iconTemplate.png'),
path.join(__dirname, './assets/iconTemplate@2x.png'),
path.join(__dirname, './assets/iconUpdateTemplate.png'),
path.join(__dirname, './assets/iconUpdateTemplate@2x.png'),
path.join(__dirname, './assets/iconDarkTemplate.png'),
path.join(__dirname, './assets/iconDarkTemplate@2x.png'),
path.join(__dirname, './assets/iconDarkUpdateTemplate.png'),
path.join(__dirname, './assets/iconDarkUpdateTemplate@2x.png'),
...(process.platform === 'darwin' ? ['../llm/ggml-metal.metal'] : []),
],
...(process.env.SIGN
? {
@@ -38,6 +42,9 @@ const config: ForgeConfig = {
},
}
: {}),
osxUniversal: {
x64ArchFiles: '**/ollama',
},
},
rebuildConfig: {},
makers: [new MakerSquirrel({}), new MakerZIP({}, ['darwin'])],

7
app/package-lock.json generated
View File

@@ -32,6 +32,7 @@
"@electron-forge/plugin-auto-unpack-natives": "^6.2.1",
"@electron-forge/plugin-webpack": "^6.2.1",
"@electron-forge/publisher-github": "^6.2.1",
"@electron/universal": "^1.4.1",
"@svgr/webpack": "^8.0.1",
"@types/chmodr": "^1.0.0",
"@types/node": "^20.4.0",
@@ -3328,9 +3329,9 @@
}
},
"node_modules/@electron/universal": {
"version": "1.3.4",
"resolved": "https://registry.npmjs.org/@electron/universal/-/universal-1.3.4.tgz",
"integrity": "sha512-BdhBgm2ZBnYyYRLRgOjM5VHkyFItsbggJ0MHycOjKWdFGYwK97ZFXH54dTvUWEfha81vfvwr5On6XBjt99uDcg==",
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/@electron/universal/-/universal-1.4.1.tgz",
"integrity": "sha512-lE/U3UNw1YHuowNbTmKNs9UlS3En3cPgwM5MI+agIgr/B1hSze9NdOP0qn7boZaI9Lph8IDv3/24g9IxnJP7aQ==",
"dev": true,
"dependencies": {
"@electron/asar": "^3.2.1",

View File

@@ -6,10 +6,10 @@
"main": ".webpack/main",
"scripts": {
"start": "electron-forge start",
"package": "electron-forge package",
"package:sign": "SIGN=1 electron-forge package",
"make": "electron-forge make",
"make:sign": "SIGN=1 electron-forge make",
"package": "electron-forge package --arch universal",
"package:sign": "SIGN=1 electron-forge package --arch universal",
"make": "electron-forge make --arch universal",
"make:sign": "SIGN=1 electron-forge make --arch universal",
"publish": "SIGN=1 electron-forge publish",
"lint": "eslint --ext .ts,.tsx .",
"format": "prettier --check . --ignore-path .gitignore",
@@ -32,6 +32,7 @@
"@electron-forge/plugin-auto-unpack-natives": "^6.2.1",
"@electron-forge/plugin-webpack": "^6.2.1",
"@electron-forge/publisher-github": "^6.2.1",
"@electron/universal": "^1.4.1",
"@svgr/webpack": "^8.0.1",
"@types/chmodr": "^1.0.0",
"@types/node": "^20.4.0",

View File

@@ -2,7 +2,7 @@ import { useState } from 'react'
import copy from 'copy-to-clipboard'
import { CheckIcon, DocumentDuplicateIcon } from '@heroicons/react/24/outline'
import Store from 'electron-store'
import { getCurrentWindow } from '@electron/remote'
import { getCurrentWindow, app } from '@electron/remote'
import { install } from './install'
import OllamaIcon from './ollama.svg'
@@ -51,10 +51,15 @@ export default function () {
<div className='mx-auto'>
<button
onClick={async () => {
await install()
getCurrentWindow().show()
getCurrentWindow().focus()
setStep(Step.FINISH)
try {
await install()
setStep(Step.FINISH)
} catch (e) {
console.error('could not install: ', e)
} finally {
getCurrentWindow().show()
getCurrentWindow().focus()
}
}}
className='no-drag rounded-dm mx-auto w-[60%] rounded-md bg-black px-4 py-2 text-sm text-white hover:brightness-110'
>

View File

@@ -1,5 +1,5 @@
import { spawn } from 'child_process'
import { app, autoUpdater, dialog, Tray, Menu, BrowserWindow, nativeTheme } from 'electron'
import { spawn, ChildProcess } from 'child_process'
import { app, autoUpdater, dialog, Tray, Menu, BrowserWindow, MenuItemConstructorOptions, nativeTheme } from 'electron'
import Store from 'electron-store'
import winston from 'winston'
import 'winston-daily-rotate-file'
@@ -10,8 +10,12 @@ import { installed } from './install'
require('@electron/remote/main').initialize()
if (require('electron-squirrel-startup')) {
app.quit()
}
const store = new Store()
let tray: Tray | null = null
let welcomeWindow: BrowserWindow | null = null
declare const MAIN_WINDOW_WEBPACK_ENTRY: string
@@ -28,10 +32,30 @@ const logger = winston.createLogger({
format: winston.format.printf(info => info.message),
})
const SingleInstanceLock = app.requestSingleInstanceLock()
if (!SingleInstanceLock) {
app.quit()
}
app.on('ready', () => {
const gotTheLock = app.requestSingleInstanceLock()
if (!gotTheLock) {
app.exit(0)
return
}
app.on('second-instance', () => {
if (app.hasSingleInstanceLock()) {
app.releaseSingleInstanceLock()
}
if (proc) {
proc.off('exit', restart)
proc.kill()
}
app.exit(0)
})
app.focus({ steal: true })
init()
})
function firstRunWindow() {
// Create the browser window.
@@ -47,65 +71,74 @@ function firstRunWindow() {
nodeIntegration: true,
contextIsolation: false,
},
alwaysOnTop: true,
})
require('@electron/remote/main').enable(welcomeWindow.webContents)
// and load the index.html of the app.
welcomeWindow.loadURL(MAIN_WINDOW_WEBPACK_ENTRY)
welcomeWindow.on('ready-to-show', () => welcomeWindow.show())
// for debugging
// welcomeWindow.webContents.openDevTools()
if (process.platform === 'darwin') {
app.dock.hide()
}
}
function createSystemtray() {
let iconPath = nativeTheme.shouldUseDarkColors
? path.join(__dirname, '..', '..', 'assets', 'ollama_icon_16x16Template.png')
: path.join(__dirname, '..', '..', 'assets', 'ollama_outline_icon_16x16Template.png')
if (app.isPackaged) {
iconPath = nativeTheme.shouldUseDarkColors
? path.join(process.resourcesPath, 'ollama_icon_16x16Template.png')
: path.join(process.resourcesPath, 'ollama_outline_icon_16x16Template.png')
}
tray = new Tray(iconPath)
nativeTheme.on('updated', function theThemeHasChanged() {
if (nativeTheme.shouldUseDarkColors) {
app.isPackaged
? tray.setImage(path.join(process.resourcesPath, 'ollama_icon_16x16Template.png'))
: tray.setImage(path.join(__dirname, '..', '..', 'assets', 'ollama_icon_16x16Template.png'))
} else {
app.isPackaged
? tray.setImage(path.join(process.resourcesPath, 'ollama_outline_icon_16x16Template.png'))
: tray.setImage(path.join(__dirname, '..', '..', 'assets', 'ollama_outline_icon_16x16Template.png'))
welcomeWindow.on('closed', () => {
if (process.platform === 'darwin') {
app.dock.hide()
}
})
const contextMenu = Menu.buildFromTemplate([{ role: 'quit', label: 'Quit Ollama', accelerator: 'Command+Q' }])
tray.setContextMenu(contextMenu)
tray.setToolTip('Ollama')
}
if (require('electron-squirrel-startup')) {
app.quit()
let tray: Tray | null = null
let updateAvailable = false
const assetPath = app.isPackaged ? process.resourcesPath : path.join(__dirname, '..', '..', 'assets')
function trayIconPath() {
return nativeTheme.shouldUseDarkColors
? updateAvailable
? path.join(assetPath, 'iconDarkUpdateTemplate.png')
: path.join(assetPath, 'iconDarkTemplate.png')
: updateAvailable
? path.join(assetPath, 'iconUpdateTemplate.png')
: path.join(assetPath, 'iconTemplate.png')
}
function updateTrayIcon() {
if (tray) {
tray.setImage(trayIconPath())
}
}
function updateTray() {
const updateItems: MenuItemConstructorOptions[] = [
{ label: 'An update is available', enabled: false },
{
label: 'Restart to update',
click: () => autoUpdater.quitAndInstall(),
},
{ type: 'separator' },
]
const menu = Menu.buildFromTemplate([
...(updateAvailable ? updateItems : []),
{ role: 'quit', label: 'Quit Ollama', accelerator: 'Command+Q' },
])
if (!tray) {
tray = new Tray(trayIconPath())
}
tray.setToolTip(updateAvailable ? 'An update is available' : 'Ollama')
tray.setContextMenu(menu)
tray.setImage(trayIconPath())
nativeTheme.off('updated', updateTrayIcon)
nativeTheme.on('updated', updateTrayIcon)
}
let proc: ChildProcess = null
function server() {
const binary = app.isPackaged
? path.join(process.resourcesPath, 'ollama')
: path.resolve(process.cwd(), '..', 'ollama')
const proc = spawn(binary, ['serve'])
proc = spawn(binary, ['serve'])
proc.stdout.on('data', data => {
logger.info(data.toString().trim())
@@ -115,23 +148,32 @@ function server() {
logger.error(data.toString().trim())
})
function restart() {
setTimeout(server, 3000)
}
proc.on('exit', restart)
}
app.on('before-quit', () => {
function restart() {
setTimeout(server, 1000)
}
app.on('before-quit', () => {
if (proc) {
proc.off('exit', restart)
proc.kill()
})
}
}
})
if (process.platform === 'darwin') {
app.dock.hide()
}
function init() {
if (app.isPackaged) {
heartbeat()
autoUpdater.checkForUpdates()
setInterval(() => {
heartbeat()
autoUpdater.checkForUpdates()
}, 60 * 60 * 1000)
}
updateTray()
app.on('ready', () => {
if (process.platform === 'darwin') {
if (app.isPackaged) {
if (!app.isInApplicationsFolder()) {
@@ -167,10 +209,13 @@ app.on('ready', () => {
}
}
createSystemtray()
server()
if (store.get('first-time-run') && installed()) {
if (process.platform === 'darwin') {
app.dock.hide()
}
app.setLoginItemSettings({ openAtLogin: app.getLoginItemSettings().openAtLogin })
return
}
@@ -178,7 +223,7 @@ app.on('ready', () => {
// This is the first run or the CLI is no longer installed
app.setLoginItemSettings({ openAtLogin: true })
firstRunWindow()
})
}
// Quit when all windows are closed, except on macOS. There, it's common
// for applications and their menu bar to stay active until the user quits
@@ -191,13 +236,18 @@ app.on('window-all-closed', () => {
// In this file you can include the rest of your app's specific main process
// code. You can also put them in separate files and import them here.
let aid = ''
try {
aid = id()
} catch (e) {}
autoUpdater.setFeedURL({
url: `https://ollama.ai/api/update?os=${process.platform}&arch=${process.arch}&version=${app.getVersion()}`,
url: `https://ollama.ai/api/update?os=${process.platform}&arch=${process.arch}&version=${app.getVersion()}&id=${aid}`,
})
async function heartbeat() {
analytics.track({
anonymousId: id(),
anonymousId: aid,
event: 'heartbeat',
properties: {
version: app.getVersion(),
@@ -205,29 +255,11 @@ async function heartbeat() {
})
}
if (app.isPackaged) {
heartbeat()
autoUpdater.checkForUpdates()
setInterval(() => {
heartbeat()
autoUpdater.checkForUpdates()
}, 60 * 60 * 1000)
}
autoUpdater.on('error', e => {
logger.error(`update check failed - ${e.message}`)
console.error(`update check failed - ${e.message}`)
})
autoUpdater.on('update-downloaded', (event, releaseNotes, releaseName) => {
dialog
.showMessageBox({
type: 'info',
buttons: ['Restart Now', 'Later'],
title: 'New update available',
message: process.platform === 'win32' ? releaseNotes : releaseName,
detail: 'A new version of Ollama is available. Restart to apply the update.',
})
.then(returnValue => {
if (returnValue.response === 0) autoUpdater.quitAndInstall()
})
autoUpdater.on('update-downloaded', () => {
updateAvailable = true
updateTray()
})

View File

@@ -15,12 +15,7 @@ export function installed() {
export async function install() {
const command = `do shell script "mkdir -p ${path.dirname(
symlinkPath
)} && ln -F -s ${ollama} ${symlinkPath}" with administrator privileges`
)} && ln -F -s \\"${ollama}\\" \\"${symlinkPath}\\"" with administrator privileges`
try {
await exec(`osascript -e '${command}'`)
} catch (error) {
console.error(`cli: failed to install cli: ${error.message}`)
return
}
await exec(`osascript -e '${command}'`)
}

View File

@@ -3,6 +3,9 @@ package cmd
import (
"bufio"
"context"
"crypto/ed25519"
"crypto/rand"
"encoding/pem"
"errors"
"fmt"
"io"
@@ -10,7 +13,10 @@ import (
"net"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"time"
@@ -18,11 +24,13 @@ import (
"github.com/dustin/go-humanize"
"github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/format"
"github.com/jmorganca/ollama/progressbar"
"github.com/jmorganca/ollama/server"
"github.com/jmorganca/ollama/version"
)
func CreateHandler(cmd *cobra.Command, args []string) error {
@@ -32,18 +40,45 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
return err
}
client := api.NewClient()
client, err := api.FromEnv()
if err != nil {
return err
}
var spinner *Spinner
request := api.CreateRequest{Name: args[0], Path: filename}
fn := func(resp api.CreateProgress) error {
if spinner != nil {
spinner.Stop()
}
var currentDigest string
var bar *progressbar.ProgressBar
spinner = NewSpinner(resp.Status)
go spinner.Spin(100 * time.Millisecond)
request := api.CreateRequest{Name: args[0], Path: filename}
fn := func(resp api.ProgressResponse) error {
if resp.Digest != currentDigest && resp.Digest != "" {
if spinner != nil {
spinner.Stop()
}
currentDigest = resp.Digest
switch {
case strings.Contains(resp.Status, "embeddings"):
bar = progressbar.Default(int64(resp.Total), resp.Status)
bar.Set(resp.Completed)
default:
// pulling
bar = progressbar.DefaultBytes(
int64(resp.Total),
resp.Status,
)
bar.Set(resp.Completed)
}
} else if resp.Digest == currentDigest && resp.Digest != "" {
bar.Set(resp.Completed)
} else {
currentDigest = ""
if spinner != nil {
spinner.Stop()
}
spinner = NewSpinner(resp.Status)
go spinner.Spin(100 * time.Millisecond)
}
return nil
}
@@ -54,13 +89,29 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
if spinner != nil {
spinner.Stop()
if spinner.description != "success" {
return errors.New("unexpected end to create model")
}
}
return nil
}
func RunHandler(cmd *cobra.Command, args []string) error {
insecure, err := cmd.Flags().GetBool("insecure")
if err != nil {
return err
}
mp := server.ParseModelPath(args[0])
if err != nil {
return err
}
if mp.ProtocolScheme == "http" && !insecure {
return fmt.Errorf("insecure protocol http")
}
fp, err := mp.GetManifestPath(false)
if err != nil {
return err
@@ -69,7 +120,7 @@ func RunHandler(cmd *cobra.Command, args []string) error {
_, err = os.Stat(fp)
switch {
case errors.Is(err, os.ErrNotExist):
if err := pull(args[0], false); err != nil {
if err := pull(args[0], insecure); err != nil {
var apiStatusError api.StatusError
if !errors.As(err, &apiStatusError) {
return err
@@ -87,27 +138,54 @@ func RunHandler(cmd *cobra.Command, args []string) error {
}
func PushHandler(cmd *cobra.Command, args []string) error {
client := api.NewClient()
client, err := api.FromEnv()
if err != nil {
return err
}
insecure, err := cmd.Flags().GetBool("insecure")
if err != nil {
return err
}
var currentDigest string
var bar *progressbar.ProgressBar
request := api.PushRequest{Name: args[0], Insecure: insecure}
fn := func(resp api.ProgressResponse) error {
fmt.Println(resp.Status)
if resp.Digest != currentDigest && resp.Digest != "" {
currentDigest = resp.Digest
bar = progressbar.DefaultBytes(
int64(resp.Total),
fmt.Sprintf("pushing %s...", resp.Digest[7:19]),
)
bar.Set(resp.Completed)
} else if resp.Digest == currentDigest && resp.Digest != "" {
bar.Set(resp.Completed)
} else {
currentDigest = ""
fmt.Println(resp.Status)
}
return nil
}
if err := client.Push(context.Background(), &request, fn); err != nil {
return err
}
if bar != nil && !bar.IsFinished() {
return errors.New("unexpected end to push model")
}
return nil
}
func ListHandler(cmd *cobra.Command, args []string) error {
client := api.NewClient()
client, err := api.FromEnv()
if err != nil {
return err
}
models, err := client.List(context.Background())
if err != nil {
@@ -137,16 +215,33 @@ func ListHandler(cmd *cobra.Command, args []string) error {
}
func DeleteHandler(cmd *cobra.Command, args []string) error {
client := api.NewClient()
client, err := api.FromEnv()
if err != nil {
return err
}
request := api.DeleteRequest{Name: args[0]}
if err := client.Delete(context.Background(), &request); err != nil {
req := api.DeleteRequest{Name: args[0]}
if err := client.Delete(context.Background(), &req); err != nil {
return err
}
fmt.Printf("deleted '%s'\n", args[0])
return nil
}
func CopyHandler(cmd *cobra.Command, args []string) error {
client, err := api.FromEnv()
if err != nil {
return err
}
req := api.CopyRequest{Source: args[0], Destination: args[1]}
if err := client.Copy(context.Background(), &req); err != nil {
return err
}
fmt.Printf("copied '%s' to '%s'\n", args[0], args[1])
return nil
}
func PullHandler(cmd *cobra.Command, args []string) error {
insecure, err := cmd.Flags().GetBool("insecure")
if err != nil {
@@ -157,7 +252,10 @@ func PullHandler(cmd *cobra.Command, args []string) error {
}
func pull(model string, insecure bool) error {
client := api.NewClient()
client, err := api.FromEnv()
if err != nil {
return err
}
var currentDigest string
var bar *progressbar.ProgressBar
@@ -178,12 +276,18 @@ func pull(model string, insecure bool) error {
currentDigest = ""
fmt.Println(resp.Status)
}
return nil
}
if err := client.Pull(context.Background(), &request, fn); err != nil {
return err
}
if bar != nil && !bar.IsFinished() {
return errors.New("unexpected end to pull model")
}
return nil
}
@@ -200,43 +304,60 @@ func RunGenerate(cmd *cobra.Command, args []string) error {
return generateBatch(cmd, args[0])
}
var generateContextKey struct{}
type generateContextKey string
func generate(cmd *cobra.Command, model, prompt string) error {
if len(strings.TrimSpace(prompt)) > 0 {
client := api.NewClient()
client, err := api.FromEnv()
if err != nil {
return err
}
spinner := NewSpinner("")
go spinner.Spin(60 * time.Millisecond)
var latest api.GenerateResponse
generateContext, ok := cmd.Context().Value(generateContextKey).([]int)
generateContext, ok := cmd.Context().Value(generateContextKey("context")).([]int)
if !ok {
generateContext = []int{}
}
request := api.GenerateRequest{Model: model, Prompt: prompt, Context: generateContext}
fn := func(resp api.GenerateResponse) error {
fn := func(response api.GenerateResponse) error {
if !spinner.IsFinished() {
spinner.Finish()
}
latest = resp
latest = response
fmt.Print(resp.Response)
cmd.SetContext(context.WithValue(cmd.Context(), generateContextKey, resp.Context))
fmt.Print(response.Response)
return nil
}
if err := client.Generate(context.Background(), &request, fn); err != nil {
if strings.Contains(err.Error(), "failed to load model") {
// tell the user to check the server log, if it exists locally
home, nestedErr := os.UserHomeDir()
if nestedErr != nil {
// return the original error
return err
}
logPath := filepath.Join(home, ".ollama", "logs", "server.log")
if _, nestedErr := os.Stat(logPath); nestedErr == nil {
err = fmt.Errorf("%w\nFor more details, check the error logs at %s", err, logPath)
}
}
return err
}
fmt.Println()
fmt.Println()
if !latest.Done {
return errors.New("unexpected end of response")
}
verbose, err := cmd.Flags().GetBool("verbose")
if err != nil {
return err
@@ -245,11 +366,29 @@ func generate(cmd *cobra.Command, model, prompt string) error {
if verbose {
latest.Summary()
}
ctx := cmd.Context()
ctx = context.WithValue(ctx, generateContextKey("context"), latest.Context)
cmd.SetContext(ctx)
}
return nil
}
func showLayer(l *server.Layer) {
filename, err := server.GetBlobsPath(l.Digest)
if err != nil {
fmt.Println("Couldn't get layer's path")
return
}
bts, err := os.ReadFile(filename)
if err != nil {
fmt.Println("Couldn't read layer")
return
}
fmt.Println(string(bts))
}
func generateInteractive(cmd *cobra.Command, model string) error {
home, err := os.UserHomeDir()
if err != nil {
@@ -270,6 +409,11 @@ func generateInteractive(cmd *cobra.Command, model string) error {
readline.PcItem("default"),
),
),
readline.PcItem("/show",
readline.PcItem("license"),
readline.PcItem("system"),
readline.PcItem("template"),
),
readline.PcItem("/exit"),
readline.PcItem("/bye"),
)
@@ -291,6 +435,9 @@ func generateInteractive(cmd *cobra.Command, model string) error {
}
defer scanner.Close()
var multiLineBuffer string
var isMultiLine bool
for {
line, err := scanner.Readline()
switch {
@@ -309,6 +456,22 @@ func generateInteractive(cmd *cobra.Command, model string) error {
line = strings.TrimSpace(line)
switch {
case isMultiLine:
if strings.HasSuffix(line, `"""`) {
isMultiLine = false
multiLineBuffer += strings.TrimSuffix(line, `"""`)
line = multiLineBuffer
multiLineBuffer = ""
scanner.SetPrompt(">>> ")
} else {
multiLineBuffer += line + " "
continue
}
case strings.HasPrefix(line, `"""`):
isMultiLine = true
multiLineBuffer = strings.TrimPrefix(line, `"""`) + " "
scanner.SetPrompt("... ")
continue
case strings.HasPrefix(line, "/list"):
args := strings.Fields(line)
if err := ListHandler(cmd, args[1:]); err != nil {
@@ -341,9 +504,61 @@ func generateInteractive(cmd *cobra.Command, model string) error {
case "emacs", "default":
scanner.SetVimMode(false)
continue
default:
usage()
continue
}
} else {
usage()
continue
}
}
} else {
usage()
continue
}
case strings.HasPrefix(line, "/show"):
args := strings.Fields(line)
if len(args) > 1 {
mp := server.ParseModelPath(model)
if err != nil {
return err
}
manifest, err := server.GetManifest(mp)
if err != nil {
fmt.Println("error: couldn't get a manifest for this model")
continue
}
switch args[1] {
case "license":
for _, l := range manifest.Layers {
if l.MediaType == "application/vnd.ollama.image.license" {
showLayer(l)
}
}
continue
case "system":
for _, l := range manifest.Layers {
if l.MediaType == "application/vnd.ollama.image.system" {
showLayer(l)
}
}
continue
case "template":
for _, l := range manifest.Layers {
if l.MediaType == "application/vnd.ollama.image.template" {
showLayer(l)
}
}
continue
default:
usage()
continue
}
} else {
usage()
continue
}
case line == "/help", line == "/?":
usage()
@@ -371,15 +586,26 @@ func generateBatch(cmd *cobra.Command, model string) error {
return nil
}
func RunServer(_ *cobra.Command, _ []string) error {
host := os.Getenv("OLLAMA_HOST")
if host == "" {
host = "127.0.0.1"
func RunServer(cmd *cobra.Command, _ []string) error {
host, port := "127.0.0.1", "11434"
parts := strings.Split(os.Getenv("OLLAMA_HOST"), ":")
if ip := net.ParseIP(parts[0]); ip != nil {
host = ip.String()
}
port := os.Getenv("OLLAMA_PORT")
if port == "" {
port = "11434"
if len(parts) > 1 {
port = parts[1]
}
// deprecated: include port in OLLAMA_HOST
if p := os.Getenv("OLLAMA_PORT"); p != "" {
port = p
}
err := initializeKeypair()
if err != nil {
return err
}
ln, err := net.Listen("tcp", fmt.Sprintf("%s:%s", host, port))
@@ -387,40 +613,150 @@ func RunServer(_ *cobra.Command, _ []string) error {
return err
}
return server.Serve(ln)
var origins []string
if o := os.Getenv("OLLAMA_ORIGINS"); o != "" {
origins = strings.Split(o, ",")
}
return server.Serve(ln, origins)
}
func initializeKeypair() error {
home, err := os.UserHomeDir()
if err != nil {
return err
}
privKeyPath := filepath.Join(home, ".ollama", "id_ed25519")
pubKeyPath := filepath.Join(home, ".ollama", "id_ed25519.pub")
_, err = os.Stat(privKeyPath)
if os.IsNotExist(err) {
fmt.Printf("Couldn't find '%s'. Generating new private key.\n", privKeyPath)
_, privKey, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return err
}
privKeyBytes, err := format.OpenSSHPrivateKey(privKey, "")
if err != nil {
return err
}
err = os.MkdirAll(path.Dir(privKeyPath), 0o700)
if err != nil {
return fmt.Errorf("could not create directory %w", err)
}
err = os.WriteFile(privKeyPath, pem.EncodeToMemory(privKeyBytes), 0o600)
if err != nil {
return err
}
sshPrivateKey, err := ssh.NewSignerFromKey(privKey)
if err != nil {
return err
}
pubKeyData := ssh.MarshalAuthorizedKey(sshPrivateKey.PublicKey())
err = os.WriteFile(pubKeyPath, pubKeyData, 0o644)
if err != nil {
return err
}
fmt.Printf("Your new public key is: \n\n%s\n", string(pubKeyData))
}
return nil
}
func startMacApp(client *api.Client) error {
exe, err := os.Executable()
if err != nil {
return err
}
link, err := os.Readlink(exe)
if err != nil {
return err
}
if !strings.Contains(link, "Ollama.app") {
return fmt.Errorf("could not find ollama app")
}
path := strings.Split(link, "Ollama.app")
if err := exec.Command("/usr/bin/open", "-a", path[0]+"Ollama.app").Run(); err != nil {
return err
}
// wait for the server to start
timeout := time.After(5 * time.Second)
tick := time.Tick(500 * time.Millisecond)
for {
select {
case <-timeout:
return errors.New("timed out waiting for server to start")
case <-tick:
if err := client.Heartbeat(context.Background()); err == nil {
return nil // server has started
}
}
}
}
func checkServerHeartbeat(_ *cobra.Command, _ []string) error {
client, err := api.FromEnv()
if err != nil {
return err
}
if err := client.Heartbeat(context.Background()); err != nil {
if !strings.Contains(err.Error(), "connection refused") {
return err
}
if runtime.GOOS == "darwin" {
if err := startMacApp(client); err != nil {
return fmt.Errorf("could not connect to ollama app, is it running?")
}
} else {
return fmt.Errorf("could not connect to ollama server, run 'ollama serve' to start it")
}
}
return nil
}
func NewCLI() *cobra.Command {
log.SetFlags(log.LstdFlags | log.Lshortfile)
rootCmd := &cobra.Command{
Use: "ollama",
Short: "Large language model runner",
SilenceUsage: true,
Use: "ollama",
Short: "Large language model runner",
SilenceUsage: true,
SilenceErrors: true,
CompletionOptions: cobra.CompletionOptions{
DisableDefaultCmd: true,
},
Version: version.Version,
}
cobra.EnableCommandSorting = false
createCmd := &cobra.Command{
Use: "create MODEL",
Short: "Create a model from a Modelfile",
Args: cobra.MinimumNArgs(1),
RunE: CreateHandler,
Use: "create MODEL",
Short: "Create a model from a Modelfile",
Args: cobra.MinimumNArgs(1),
PreRunE: checkServerHeartbeat,
RunE: CreateHandler,
}
createCmd.Flags().StringP("file", "f", "Modelfile", "Name of the Modelfile (default \"Modelfile\")")
runCmd := &cobra.Command{
Use: "run MODEL [PROMPT]",
Short: "Run a model",
Args: cobra.MinimumNArgs(1),
RunE: RunHandler,
Use: "run MODEL [PROMPT]",
Short: "Run a model",
Args: cobra.MinimumNArgs(1),
PreRunE: checkServerHeartbeat,
RunE: RunHandler,
}
runCmd.Flags().Bool("verbose", false, "Show timings for response")
runCmd.Flags().Bool("insecure", false, "Use an insecure registry")
serveCmd := &cobra.Command{
Use: "serve",
@@ -430,19 +766,21 @@ func NewCLI() *cobra.Command {
}
pullCmd := &cobra.Command{
Use: "pull MODEL",
Short: "Pull a model from a registry",
Args: cobra.MinimumNArgs(1),
RunE: PullHandler,
Use: "pull MODEL",
Short: "Pull a model from a registry",
Args: cobra.MinimumNArgs(1),
PreRunE: checkServerHeartbeat,
RunE: PullHandler,
}
pullCmd.Flags().Bool("insecure", false, "Use an insecure registry")
pushCmd := &cobra.Command{
Use: "push MODEL",
Short: "Push a model to a registry",
Args: cobra.MinimumNArgs(1),
RunE: PushHandler,
Use: "push MODEL",
Short: "Push a model to a registry",
Args: cobra.MinimumNArgs(1),
PreRunE: checkServerHeartbeat,
RunE: PushHandler,
}
pushCmd.Flags().Bool("insecure", false, "Use an insecure registry")
@@ -451,14 +789,24 @@ func NewCLI() *cobra.Command {
Use: "list",
Aliases: []string{"ls"},
Short: "List models",
PreRunE: checkServerHeartbeat,
RunE: ListHandler,
}
copyCmd := &cobra.Command{
Use: "cp",
Short: "Copy a model",
Args: cobra.MinimumNArgs(2),
PreRunE: checkServerHeartbeat,
RunE: CopyHandler,
}
deleteCmd := &cobra.Command{
Use: "rm",
Short: "Remove a model",
Args: cobra.MinimumNArgs(1),
RunE: DeleteHandler,
Use: "rm",
Short: "Remove a model",
Args: cobra.MinimumNArgs(1),
PreRunE: checkServerHeartbeat,
RunE: DeleteHandler,
}
rootCmd.AddCommand(
@@ -468,6 +816,7 @@ func NewCLI() *cobra.Command {
pullCmd,
pushCmd,
listCmd,
copyCmd,
deleteCmd,
)

6
docs/README.md Normal file
View File

@@ -0,0 +1,6 @@
# Documentation
- [Modelfile](./modelfile.md)
- [How to develop Ollama](./development.md)
- [API](./api.md)
- [Tutorials](./tutorials.md)

259
docs/api.md Normal file
View File

@@ -0,0 +1,259 @@
# API
## Endpoints
- [Generate a completion](#generate-a-completion)
- [Create a model](#create-a-model)
- [List local models](#list-local-models)
- [Copy a model](#copy-a-model)
- [Delete a model](#delete-a-model)
- [Pull a model](#pull-a-model)
- [Generate embeddings](#generate-embeddings)
## Conventions
### Model names
Model names follow a `model:tag` format. Some examples are `orca:3b-q4_1` and `llama2:70b`. The tag is optional and if not provided will default to `latest`. The tag is used to identify a specific version.
### Durations
All durations are returned in nanoseconds.
## Generate a completion
```
POST /api/generate
```
Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses. The final response object will include statistics and additional data from the request.
### Parameters
- `model`: (required) the [model name](#model-names)
- `prompt`: the prompt to generate a response for
Advanced parameters:
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
- `system`: system prompt to (overrides what is defined in the `Modelfile`)
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
### Request
```
curl -X POST http://localhost:11434/api/generate -d '{
"model": "llama2:7b",
"prompt": "Why is the sky blue?"
}'
```
### Response
A stream of JSON objects:
```json
{
"model": "llama2:7b",
"created_at": "2023-08-04T08:52:19.385406455-07:00",
"response": "The",
"done": false
}
```
The final response in the stream also includes additional data about the generation:
- `total_duration`: time spent generating the response
- `load_duration`: time spent in nanoseconds loading the model
- `sample_count`: number of samples generated
- `sample_duration`: time spent generating samples
- `prompt_eval_count`: number of tokens in the prompt
- `prompt_eval_duration`: time spent in nanoseconds evaluating the prompt
- `eval_count`: number of tokens the response
- `eval_duration`: time in nanoseconds spent generating the response
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`.
```json
{
"model": "llama2:7b",
"created_at": "2023-08-04T19:22:45.499127Z",
"context": [1, 2, 3],
"done": true,
"total_duration": 5589157167,
"load_duration": 3013701500,
"sample_count": 114,
"sample_duration": 81442000,
"prompt_eval_count": 46,
"prompt_eval_duration": 1160282000,
"eval_count": 113,
"eval_duration": 1325948000
}
```
## Create a Model
```
POST /api/create
```
Create a model from a [`Modelfile`](./modelfile.md)
### Parameters
- `name`: name of the model to create
- `path`: path to the Modelfile
### Request
```
curl -X POST http://localhost:11434/api/create -d '{
"name": "mario",
"path": "~/Modelfile"
}'
```
### Response
A stream of JSON objects. When finished, `status` is `success`
```json
{
"status": "parsing modelfile"
}
```
## List Local Models
```
GET /api/tags
```
List models that are available locally.
### Request
```
curl http://localhost:11434/api/tags
```
### Response
```json
{
"models": [
{
"name": "llama2:7b",
"modified_at": "2023-08-02T17:02:23.713454393-07:00",
"size": 3791730596
},
{
"name": "llama2:13b",
"modified_at": "2023-08-08T12:08:38.093596297-07:00",
"size": 7323310500
}
]
}
```
## Copy a Model
```
POST /api/copy
```
Copy a model. Creates a model with another name from an existing model.
### Request
```
curl http://localhost:11434/api/copy -d '{
"source": "llama2:7b",
"destination": "llama2-backup"
}'
```
## Delete a Model
```
DELETE /api/delete
```
Delete a model and its data.
### Parameters
- `model`: model name to delete
### Request
```
curl -X DELETE http://localhost:11434/api/delete -d '{
"name": "llama2:13b"
}'
```
## Pull a Model
```
POST /api/pull
```
Download a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple calls to will share the same download progress.
### Parameters
- `name`: name of the model to pull
### Request
```
curl -X POST http://localhost:11434/api/pull -d '{
"name": "llama2:7b"
}'
```
### Response
```json
{
"status": "downloading digestname",
"digest": "digestname",
"total": 2142590208
}
```
## Generate Embeddings
```
POST /api/embeddings
```
Generate embeddings from a model
### Parameters
- `model`: name of model to generate embeddings from
- `prompt`: text to generate embeddings for
### Request
```
curl -X POST http://localhost:11434/api/embeddings -d '{
"model": "llama2:7b",
"prompt": "Here is an article about llamas..."
}'
```
### Response
```json
{
"embeddings": [
0.5670403838157654, 0.009260174818336964, 0.23178744316101074, -0.2916173040866852, -0.8924556970596313,
0.8785552978515625, -0.34576427936553955, 0.5742510557174683, -0.04222835972905159, -0.137906014919281
]
}
```

View File

@@ -12,6 +12,8 @@ Enable CGO:
export CGO_ENABLED=1
```
You will also need a C/C++ compiler such as GCC for MacOS and Linux or Mingw-w64 GCC for Windows.
Then build ollama:
```
@@ -28,19 +30,15 @@ Now you can run `ollama`:
To release a new version of Ollama you'll need to set some environment variables:
* `GITHUB_TOKEN`: your GitHub token
* `APPLE_IDENTITY`: the Apple signing identity (macOS only)
* `APPLE_ID`: your Apple ID
* `APPLE_PASSWORD`: your Apple ID app-specific password
* `APPLE_TEAM_ID`: the Apple team ID for the signing identity
* `TELEMETRY_WRITE_KEY`: segment write key for telemetry
- `GITHUB_TOKEN`: your GitHub token
- `APPLE_IDENTITY`: the Apple signing identity (macOS only)
- `APPLE_ID`: your Apple ID
- `APPLE_PASSWORD`: your Apple ID app-specific password
- `APPLE_TEAM_ID`: the Apple team ID for the signing identity
- `TELEMETRY_WRITE_KEY`: segment write key for telemetry
Then run the publish script with the target version:
```
VERSION=0.0.2 ./scripts/publish.sh
```

17
docs/faq.md Normal file
View File

@@ -0,0 +1,17 @@
# FAQ
## How can I expose the Ollama server?
```
OLLAMA_HOST=0.0.0.0:11435 ollama serve
```
By default, Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0`. To support more origins, you can use the `OLLAMA_ORIGINS` environment variable:
```
OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com ollama serve
```
## Where are models stored?
Raw model data is stored under `~/.ollama/models`.

View File

@@ -4,6 +4,24 @@
A model file is the blueprint to create and share models with Ollama.
## Table of Contents
- [Format](#format)
- [Examples](#examples)
- [Instructions](#instructions)
- [FROM (Required)](#from-required)
- [Build from llama2](#build-from-llama2)
- [Build from a bin file](#build-from-a-bin-file)
- [EMBED](#embed)
- [PARAMETER](#parameter)
- [Valid Parameters and Values](#valid-parameters-and-values)
- [TEMPLATE](#template)
- [Template Variables](#template-variables)
- [SYSTEM](#system)
- [ADAPTER](#adapter)
- [LICENSE](#license)
- [Notes](#notes)
## Format
The format of the Modelfile:
@@ -13,13 +31,14 @@ The format of the Modelfile:
INSTRUCTION arguments
```
| Instruction | Description |
| ----------------- | ----------------------------------------------------- |
| `FROM` (required) | Defines the base model to use |
| `PARAMETER` | Sets the parameters for how Ollama will run the model |
| `SYSTEM` | Specifies the system prompt that will set the context |
| `TEMPLATE` | The full prompt template to be sent to the model |
| `LICENSE` | Specifies the legal license |
| Instruction | Description |
| ----------------------------------- | ------------------------------------------------------------- |
| [`FROM`](#from-required) (required) | Defines the base model to use. |
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
| [`SYSTEM`](#system) | Specifies the system prompt that will be set in the template. |
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
| [`LICENSE`](#license) | Specifies the legal license. |
## Examples
@@ -28,11 +47,11 @@ An example of a model file creating a mario blueprint:
```
FROM llama2
# sets the temperature to 1 [higher is more creative, lower is more coherent]
# sets the context size to 4096
PARAMETER temperature 1
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
PARAMETER num_ctx 4096
# Overriding the system prompt
# sets a custom system prompt to specify the behavior of the chat assistant
SYSTEM You are Mario from super mario bros, acting as an assistant.
```
@@ -43,7 +62,11 @@ To use this:
3. `ollama run NAME`
4. Start using the model!
## FROM (Required)
More examples are available in the [examples directory](../examples).
## Instructions
### FROM (Required)
The FROM instruction defines the base model to use when creating a model.
@@ -51,7 +74,7 @@ The FROM instruction defines the base model to use when creating a model.
FROM <model name>:<tag>
```
### Build from llama2
#### Build from llama2
```
FROM llama2
@@ -60,13 +83,25 @@ FROM llama2
A list of available base models:
<https://github.com/jmorganca/ollama#model-library>
### Build from a bin file
#### Build from a bin file
```
FROM ./ollama-model.bin
```
## PARAMETER (Optional)
This bin file location should be specified as an absolute path or relative to the Modelfile location.
### EMBED
The EMBED instruction is used to add embeddings of files to a model. This is useful for adding custom data that the model can reference when generating an answer. Note that currently only text files are supported, formatted with each line as one embedding.
```
FROM <model name>:<tag>
EMBED <file path>.txt
EMBED <different file path>.txt
EMBED <path to directory>/*.txt
```
### PARAMETER
The `PARAMETER` instruction defines a parameter that can be set when the model is run.
@@ -76,28 +111,76 @@ PARAMETER <parameter> <parametervalue>
### Valid Parameters and Values
| Parameter | Description | Value Type | Example Usage |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | ------------------ |
| num_ctx | Sets the size of the prompt context size length model. (Default: 2048) | int | num_ctx 4096 |
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
| top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 |
| top_p | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) | float | top_p 0.9 |
| num_gpu | The number of GPUs to use. On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 1 |
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = ctx-size) | int | repeat_last_n 64 |
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 |
| mirostat | Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) | int | mirostat 0 |
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
| num_thread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | num_thread 8 |
| Parameter | Description | Value Type | Example Usage |
| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------- | -------------------- |
| mirostat | Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) | int | mirostat 0 |
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
| num_gqa | The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b | int | num_gqa 1 |
| num_gpu | The number of GPUs to use. On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 1 |
| num_thread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | num_thread 8 |
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
| stop | Sets the stop tokens to use. | string | stop "AI assistant:" |
| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 |
| top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 |
| top_p | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) | float | top_p 0.9 |
## Prompt
### TEMPLATE
When building on top of the base models supplied by Ollama, it comes with the prompt template predefined. To override the supplied system prompt, simply add `SYSTEM insert system prompt` to change the system prompt.
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific.
### Prompt Template
#### Template Variables
`TEMPLATE` the full prompt template to be passed into the model. It may include (optionally) a system prompt, user prompt, and assistant prompt. This is used to create a full custom prompt, and syntax may be model specific.
| Variable | Description |
| --------------- | ------------------------------------------------------------------------------------------------------------ |
| `{{ .System }}` | The system prompt used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
```
TEMPLATE """
{{- if .First }}
### System:
{{ .System }}
{{- end }}
### User:
{{ .Prompt }}
### Response:
"""
SYSTEM """<system message>"""
```
### SYSTEM
The `SYSTEM` instruction specifies the system prompt to be used in the template, if applicable.
```
SYSTEM """<system message>"""
```
### ADAPTER
The `ADAPTER` instruction specifies the LoRA adapter to apply to the base model. The value of this instruction should be an absolute path or a path relative to the Modelfile and the file must be in a GGML file format. The adapter should be tuned from the base model otherwise the behaviour is undefined.
```
ADAPTER ./ollama-lora.bin
```
### LICENSE
The `LICENSE` instruction allows you to specify the legal license under which the model used with this Modelfile is shared or distributed.
```
LICENSE """
<license text>
"""
```
## Notes

8
docs/tutorials.md Normal file
View File

@@ -0,0 +1,8 @@
# Tutorials
Here is a list of ways you can use Ollama with other tools to build interesting applications.
- [Using LangChain with Ollama in JavaScript](./tutorials/langchainjs.md)
- [Using LangChain with Ollama in Python](./tutorials/langchainpy.md)
Also be sure to check out the [examples](../examples) directory for more ways to use Ollama.

View File

@@ -0,0 +1,73 @@
# Using LangChain with Ollama using JavaScript
In this tutorial, we are going to use JavaScript with LangChain and Ollama to learn about something just a touch more recent. In August 2023, there was a series of wildfires on Maui. There is no way an LLM trained before that time can know about this, since their training data would not include anything as recent as that. So we can find the [Wikipedia article about the fires](https://en.wikipedia.org/wiki/2023_Hawaii_wildfires) and ask questions about the contents.
To get started, let's just use **LangChain** to ask a simple question to a model. To do this with JavaScript, we need to install **LangChain**:
```bash
npm install langchain
```
Now we can start building out our JavaScript:
```javascript
import { Ollama } from "langchain/llms/ollama";
const ollama = new Ollama({
baseUrl: "http://localhost:11434",
model: "llama2",
});
const answer = await ollama.call(`why is the sky blue?`);
console.log(answer);
```
That will get us the same thing as if we ran `ollama run llama2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's build that part of the app.
```javascript
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
const loader = new CheerioWebBaseLoader("https://en.wikipedia.org/wiki/2023_Hawaii_wildfires");
const data = loader.load();
```
That will load the document. Although this page is smaller than the Odyssey, it is certainly bigger than the context size for most LLMs. So we are going to need to split into smaller pieces, and then select just the pieces relevant to our question. This is a great use for a vector datastore. In this example, we will use the **MemoryVectorStore** that is part of **LangChain**. But there is one more thing we need to get the content into the datastore. We have to run an embeddings process that converts the tokens in the text into a series of vectors. And for that, we are going to use **Tensorflow**. There is a lot of stuff going on in this one. First, install the **Tensorflow** components that we need.
```javascript
npm install @tensorflow/tfjs-core@3.6.0 @tensorflow/tfjs-converter@3.6.0 @tensorflow-models/universal-sentence-encoder@1.3.3 @tensorflow/tfjs-node@4.10.0
```
If you just install those components without the version numbers, it will install the latest versions, but there are conflicts within **Tensorflow**, so you need to install the compatible versions.
```javascript
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import "@tensorflow/tfjs-node";
import { TensorFlowEmbeddings } from "langchain/embeddings/tensorflow";
// Split the text into 500 character chunks. And overlap each chunk by 20 characters
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: 500,
chunkOverlap: 20
});
const splitDocs = await textSplitter.splitDocuments(data);
// Then use the TensorFlow Embedding to store these chunks in the datastore
const vectorStore = await MemoryVectorStore.fromDocuments(splitDocs, new TensorFlowEmbeddings());
```
To connect the datastore to a question asked to a LLM, we need to use the concept at the heart of **LangChain**: the chain. Chains are a way to connect a number of activities together to accomplish a particular tasks. There are a number of chain types available, but for this tutorial we are using the **RetrievalQAChain**.
```javascript
import { RetrievalQAChain } from "langchain/chains";
const retriever = vectorStore.asRetriever();
const chain = RetrievalQAChain.fromLLM(ollama, retriever);
const result = await chain.call({query: "When was Hawaii's request for a major disaster declaration approved?"});
console.log(result.text)
```
So we created a retriever, which is a way to return the chunks that match a query from a datastore. And then connect the retriever and the model via a chain. Finally, we send a query to the chain, which results in an answer using our document as a source. The answer it returned was correct, August 10, 2023.
And that is a simple introduction to what you can do with **LangChain** and **Ollama.**

View File

@@ -0,0 +1,81 @@
# Using LangChain with Ollama in Python
Let's imagine we are studying the classics, such as **the Odyssey** by **Homer**. We might have a question about Neleus and his family. If you ask llama2 for that info, you may get something like:
> I apologize, but I'm a large language model, I cannot provide information on individuals or families that do not exist in reality. Neleus is not a real person or character, and therefore does not have a family or any other personal details. My apologies for any confusion. Is there anything else I can help you with?
This sounds like a typical censored response, but even llama2-uncensored gives a mediocre answer:
> Neleus was a legendary king of Pylos and the father of Nestor, one of the Argonauts. His mother was Clymene, a sea nymph, while his father was Neptune, the god of the sea.
So let's figure out how we can use **LangChain** with Ollama to ask our question to the actual document, the Odyssey by Homer, using Python.
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
`pip install langchain`
Then we can create a model and ask the question:
```python
from langchain.llms import Ollama
ollama = Ollama(base_url='http://localhost:11434',
model="llama2")
print(ollama("why is the sky blue"))
```
Notice that we are defining the model and the base URL for Ollama.
Now let's load a document to ask questions against. I'll load up the Odyssey by Homer, which you can find at Project Gutenberg. We will need **WebBaseLoader** which is part of **LangChain** and loads text from any webpage. On my machine, I also needed to install **bs4** to get that to work, so run `pip install bs4`.
```python
from langchain.document_loaders import WebBaseLoader
loader = WebBaseLoader("https://www.gutenberg.org/files/1727/1727-h/1727-h.htm")
data = loader.load()
```
This file is pretty big. Just the preface is 3000 tokens. Which means the full document won't fit into the context for the model. So we need to split it up into smaller pieces.
```python
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
```
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. For now, we don't have embeddings built in to Ollama, though we will be adding that soon, so for now, we can use the GPT4All library for that. We will use ChromaDB in this example for a vector database. `pip install GPT4All chromadb`
```python
from langchain.embeddings import GPT4AllEmbeddings
from langchain.vectorstores import Chroma
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
```
Now let's ask a question from the document. **Who was Neleus, and who is in his family?** Neleus is a character in the Odyssey, and the answer can be found in our text.
```python
question="Who is Neleus and who is in Neleus' family?"
docs = vectorstore.similarity_search(question)
len(docs)
```
This will output the number of matches for chunks of data similar to the search.
The next thing is to send the question and the relevant parts of the docs to the model to see if we can get a good answer. But we are stitching two parts of the process together, and that is called a chain. This means we need to define a chain:
```python
from langchain.chains import RetrievalQA
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
qachain({"query": question})
```
The answer received from this chain was:
> Neleus is a character in Homer's "Odyssey" and is mentioned in the context of Penelope's suitors. Neleus is the father of Chloris, who is married to Neleus and bears him several children, including Nestor, Chromius, Periclymenus, and Pero. Amphinomus, the son of Nisus, is also mentioned as a suitor of Penelope and is known for his good natural disposition and agreeable conversation.
It's not a perfect answer, as it implies Neleus married his daughter when actually Chloris "was the youngest daughter to Amphion son of Iasus and king of Minyan Orchomenus, and was Queen in Pylos".
I updated the chunk_overlap for the text splitter to 20 and tried again and got a much better answer:
> Neleus is a character in Homer's epic poem "The Odyssey." He is the husband of Chloris, who is the youngest daughter of Amphion son of Iasus and king of Minyan Orchomenus. Neleus has several children with Chloris, including Nestor, Chromius, Periclymenus, and Pero.
And that is a much better answer.

View File

@@ -1,6 +1,6 @@
# Examples
This directory contains examples that can be created and run with `ollama`.
This directory contains different examples of using Ollama
To create a model:

View File

@@ -0,0 +1,8 @@
# Modelfile for creating a devops engineer assistant
# Run `ollama create devops-engineer -f ./Modelfile` and then `ollama run devops-engineer` and enter a topic
FROM llama2:13b
PARAMETER temperature 1
SYSTEM """
You are a senior devops engineer, acting as an assistant. You offer help with cloud technologies like: Terraform, AWS, kubernetes, python. You answer with code examples when possible
"""

View File

@@ -0,0 +1,20 @@
FROM llama2
SYSTEM """
You are an experience Devops engineer focused on docker. When given specifications for a particular need or application you know the best way to host that within a docker container. For instance if someone tells you they want an nginx server to host files located at /web you will answer as follows
---start
FROM nginx:alpine
COPY /myweb /usr/share/nginx/html
EXPOSE 80
---end
Notice that the answer you should give is just the contents of the dockerfile with no explanation and there are three dashes and the word start at the beginning and 3 dashes and the word end. The full output can be piped into a file and run as is. Here is another example. The user will ask to launch a Postgres server with a password of abc123. And the response should be
---start
FROM postgres:latest
ENV POSTGRES_PASSWORD=abc123
EXPOSE 5432
---end
Again it's just the contents of the dockerfile an nothing else.
"""

View File

@@ -0,0 +1,15 @@
# DockerIt
DockerIt is a tool to help you build and run your application in a Docker container. It consists of a model that defines the system prompt and model weights to use, along with a python script to then build the container and run the image automatically.
## Caveats
This is an simple example. It's assuming the Dockerfile content generated is going to work. In many cases, even with simple web servers, it fails when trying to copy files that don't exist. It's simply an example of what you could possibly do.
## Example Usage
```bash
> python3 ./dockerit.py "simple postgres server with admin password set to 123"
Enter the name of the image: matttest
Container named happy_keller started with id: 7c201bb6c30f02b356ddbc8e2a5af9d7d7d7b8c228519c9a501d15c0bd9d6b3e
```

View File

@@ -0,0 +1,17 @@
import requests, json, docker, io, sys
inputDescription = " ".join(sys.argv[1:])
imageName = input("Enter the name of the image: ")
client = docker.from_env()
s = requests.Session()
output=""
with s.post('http://localhost:11434/api/generate', json={'model': 'dockerit', 'prompt': inputDescription}, stream=True) as r:
for line in r.iter_lines():
if line:
j = json.loads(line)
if "response" in j:
output = output +j["response"]
output = output[output.find("---start")+9:output.find("---end")-1]
f = io.BytesIO(bytes(output, 'utf-8'))
client.images.build(fileobj=f, tag=imageName)
container = client.containers.run(imageName, detach=True)
print("Container named", container.name, " started with id: ",container.id)

View File

@@ -0,0 +1 @@
docker

View File

@@ -0,0 +1,21 @@
# LangChain Document QA
This example provides an interface for asking questions to a PDF document.
## Setup
```
pip install -r requirements.txt
```
## Run
```
python main.py
```
A prompt will appear, where questions may be asked:
```
Query: How many locations does WeWork have?
```

View File

@@ -0,0 +1,61 @@
from langchain.document_loaders import OnlinePDFLoader
from langchain.vectorstores import Chroma
from langchain.embeddings import GPT4AllEmbeddings
from langchain import PromptTemplate
from langchain.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import RetrievalQA
import sys
import os
class SuppressStdout:
def __enter__(self):
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
# load the pdf and split it into chunks
loader = OnlinePDFLoader("https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf")
data = loader.load()
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
with SuppressStdout():
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
while True:
query = input("\nQuery: ")
if query == "exit":
break
if query.strip() == "":
continue
# Prompt
template = """Use the following pieces of context to answer the question at the end.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Use three sentences maximum and keep the answer as concise as possible.
{context}
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
llm = Ollama(model="llama2:13b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vectorstore.as_retriever(),
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
)
result = qa_chain({"query": query})

View File

@@ -0,0 +1,109 @@
absl-py==1.4.0
aiohttp==3.8.5
aiosignal==1.3.1
anyio==3.7.1
astunparse==1.6.3
async-timeout==4.0.3
attrs==23.1.0
backoff==2.2.1
beautifulsoup4==4.12.2
bs4==0.0.1
cachetools==5.3.1
certifi==2023.7.22
cffi==1.15.1
chardet==5.2.0
charset-normalizer==3.2.0
Chroma==0.2.0
chroma-hnswlib==0.7.2
chromadb==0.4.5
click==8.1.6
coloredlogs==15.0.1
cryptography==41.0.3
dataclasses-json==0.5.14
fastapi==0.99.1
filetype==1.2.0
flatbuffers==23.5.26
frozenlist==1.4.0
gast==0.4.0
google-auth==2.22.0
google-auth-oauthlib==1.0.0
google-pasta==0.2.0
gpt4all==1.0.8
grpcio==1.57.0
h11==0.14.0
h5py==3.9.0
httptools==0.6.0
humanfriendly==10.0
idna==3.4
importlib-resources==6.0.1
joblib==1.3.2
keras==2.13.1
langchain==0.0.261
langsmith==0.0.21
libclang==16.0.6
lxml==4.9.3
Markdown==3.4.4
MarkupSafe==2.1.3
marshmallow==3.20.1
monotonic==1.6
mpmath==1.3.0
multidict==6.0.4
mypy-extensions==1.0.0
nltk==3.8.1
numexpr==2.8.5
numpy==1.24.3
oauthlib==3.2.2
onnxruntime==1.15.1
openapi-schema-pydantic==1.2.4
opt-einsum==3.3.0
overrides==7.4.0
packaging==23.1
pdf2image==1.16.3
pdfminer==20191125
pdfminer.six==20221105
Pillow==10.0.0
posthog==3.0.1
protobuf==4.24.0
pulsar-client==3.2.0
pyasn1==0.5.0
pyasn1-modules==0.3.0
pycparser==2.21
pycryptodome==3.18.0
pydantic==1.10.12
PyPika==0.48.9
python-dateutil==2.8.2
python-dotenv==1.0.0
python-magic==0.4.27
PyYAML==6.0.1
regex==2023.8.8
requests==2.31.0
requests-oauthlib==1.3.1
rsa==4.9
six==1.16.0
sniffio==1.3.0
soupsieve==2.4.1
SQLAlchemy==2.0.19
starlette==0.27.0
sympy==1.12
tabulate==0.9.0
tenacity==8.2.2
tensorboard==2.13.0
tensorboard-data-server==0.7.1
tensorflow==2.13.0
tensorflow-estimator==2.13.0
tensorflow-hub==0.14.0
tensorflow-macos==2.13.0
termcolor==2.3.0
tokenizers==0.13.3
tqdm==4.66.1
typing-inspect==0.9.0
typing_extensions==4.5.0
unstructured==0.9.2
urllib3==1.26.16
uvicorn==0.23.2
uvloop==0.17.0
watchfiles==0.19.0
websockets==11.0.3
Werkzeug==2.3.6
wrapt==1.15.0
yarl==1.9.2

View File

@@ -0,0 +1,15 @@
# LangChain Web Summarization
This example summarizes a website
## Setup
```
pip install -r requirements.txt
```
## Run
```
python main.py
```

View File

@@ -0,0 +1,12 @@
from langchain.llms import Ollama
from langchain.document_loaders import WebBaseLoader
from langchain.chains.summarize import load_summarize_chain
loader = WebBaseLoader("https://ollama.ai/blog/run-llama2-uncensored-locally")
docs = loader.load()
llm = Ollama(model="llama2")
chain = load_summarize_chain(llm, chain_type="stuff")
result = chain.run(docs)
print(result)

View File

@@ -0,0 +1,2 @@
langchain==0.0.259
bs4==0.0.1

View File

@@ -0,0 +1,21 @@
# LangChain
This example is a basic "hello world" of using LangChain with Ollama.
## Setup
```
pip install -r requirements.txt
```
## Run
```
python main.py
```
Running this example will print the response for "hello":
```
Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat?
```

View File

@@ -0,0 +1,4 @@
from langchain.llms import Ollama
llm = Ollama(model="llama2")
res = llm.predict("hello")
print (res)

View File

@@ -0,0 +1 @@
langchain==0.0.259

170
examples/privategpt/.gitignore vendored Normal file
View File

@@ -0,0 +1,170 @@
# OSX
.DS_STORE
# Models
models/
# Local Chroma db
.chroma/
db/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

201
examples/privategpt/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,91 @@
# PrivateGPT with Llama 2 uncensored
https://github.com/jmorganca/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b
> Note: this example is a slightly modified version of PrivateGPT using models such as Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it, and you can find his GitHub repo [here](https://github.com/imartinez/privateGPT).
### Setup
Set up a virtual environment (optional):
```
python3 -m venv .venv
source .venv/bin/activate
```
Install the Python dependencies:
```shell
pip install -r requirements.txt
```
Pull the model you'd like to use:
```
ollama pull llama2-uncensored
```
### Getting WeWork's latest quarterly earnings report (10-Q)
```
mkdir source_documents
curl https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf -o source_documents/wework.pdf
```
### Ingesting files
```shell
python ingest.py
```
Output should look like this:
```shell
Creating new vectorstore
Loading documents from source_documents
Loading new documents: 100%|██████████████████████| 1/1 [00:01<00:00, 1.73s/it]
Loaded 1 new documents from source_documents
Split into 90 chunks of text (max. 500 tokens each)
Creating embeddings. May take some minutes...
Using embedded DuckDB with persistence: data will be stored in: db
Ingestion complete! You can now run privateGPT.py to query your documents
```
### Ask questions
```shell
python privateGPT.py
Enter a query: How many locations does WeWork have?
> Answer (took 17.7 s.):
As of June 2023, WeWork has 777 locations worldwide, including 610 Consolidated Locations (as defined in the section entitled Key Performance Indicators).
```
### Try a different model:
```
ollama pull llama2:13b
MODEL=llama2:13b python privateGPT.py
```
## Adding more files
Put any and all your files into the `source_documents` directory
The supported extensions are:
- `.csv`: CSV,
- `.docx`: Word Document,
- `.doc`: Word Document,
- `.enex`: EverNote,
- `.eml`: Email,
- `.epub`: EPub,
- `.html`: HTML File,
- `.md`: Markdown,
- `.msg`: Outlook Message,
- `.odt`: Open Document Text,
- `.pdf`: Portable Document Format (PDF),
- `.pptx` : PowerPoint Document,
- `.ppt` : PowerPoint Document,
- `.txt`: Text file (UTF-8),

View File

@@ -0,0 +1,12 @@
import os
from chromadb.config import Settings
# Define the folder for storing database
PERSIST_DIRECTORY = os.environ.get('PERSIST_DIRECTORY', 'db')
# Define the Chroma settings
CHROMA_SETTINGS = Settings(
chroma_db_impl='duckdb+parquet',
persist_directory=PERSIST_DIRECTORY,
anonymized_telemetry=False
)

161
examples/privategpt/ingest.py Executable file
View File

@@ -0,0 +1,161 @@
#!/usr/bin/env python3
import os
import glob
from typing import List
from multiprocessing import Pool
from tqdm import tqdm
from langchain.document_loaders import (
CSVLoader,
EverNoteLoader,
PyMuPDFLoader,
TextLoader,
UnstructuredEmailLoader,
UnstructuredEPubLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredODTLoader,
UnstructuredPowerPointLoader,
UnstructuredWordDocumentLoader,
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document
from constants import CHROMA_SETTINGS
# Load environment variables
persist_directory = os.environ.get('PERSIST_DIRECTORY', 'db')
source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents')
embeddings_model_name = os.environ.get('EMBEDDINGS_MODEL_NAME', 'all-MiniLM-L6-v2')
chunk_size = 500
chunk_overlap = 50
# Custom document loaders
class MyElmLoader(UnstructuredEmailLoader):
"""Wrapper to fallback to text/plain when default does not work"""
def load(self) -> List[Document]:
"""Wrapper adding fallback for elm without html"""
try:
try:
doc = UnstructuredEmailLoader.load(self)
except ValueError as e:
if 'text/html content not found in email' in str(e):
# Try plain text
self.unstructured_kwargs["content_source"]="text/plain"
doc = UnstructuredEmailLoader.load(self)
else:
raise
except Exception as e:
# Add file_path to exception message
raise type(e)(f"{self.file_path}: {e}") from e
return doc
# Map file extensions to document loaders and their arguments
LOADER_MAPPING = {
".csv": (CSVLoader, {}),
# ".docx": (Docx2txtLoader, {}),
".doc": (UnstructuredWordDocumentLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".enex": (EverNoteLoader, {}),
".eml": (MyElmLoader, {}),
".epub": (UnstructuredEPubLoader, {}),
".html": (UnstructuredHTMLLoader, {}),
".md": (UnstructuredMarkdownLoader, {}),
".odt": (UnstructuredODTLoader, {}),
".pdf": (PyMuPDFLoader, {}),
".ppt": (UnstructuredPowerPointLoader, {}),
".pptx": (UnstructuredPowerPointLoader, {}),
".txt": (TextLoader, {"encoding": "utf8"}),
# Add more mappings for other file extensions and loaders as needed
}
def load_single_document(file_path: str) -> List[Document]:
ext = "." + file_path.rsplit(".", 1)[-1]
if ext in LOADER_MAPPING:
loader_class, loader_args = LOADER_MAPPING[ext]
loader = loader_class(file_path, **loader_args)
return loader.load()
raise ValueError(f"Unsupported file extension '{ext}'")
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
"""
Loads all documents from the source documents directory, ignoring specified files
"""
all_files = []
for ext in LOADER_MAPPING:
all_files.extend(
glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
)
filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]
with Pool(processes=os.cpu_count()) as pool:
results = []
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
results.extend(docs)
pbar.update()
return results
def process_documents(ignored_files: List[str] = []) -> List[Document]:
"""
Load documents and split in chunks
"""
print(f"Loading documents from {source_directory}")
documents = load_documents(source_directory, ignored_files)
if not documents:
print("No new documents to load")
exit(0)
print(f"Loaded {len(documents)} new documents from {source_directory}")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_documents(documents)
print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
return texts
def does_vectorstore_exist(persist_directory: str) -> bool:
"""
Checks if vectorstore exists
"""
if os.path.exists(os.path.join(persist_directory, 'index')):
if os.path.exists(os.path.join(persist_directory, 'chroma-collections.parquet')) and os.path.exists(os.path.join(persist_directory, 'chroma-embeddings.parquet')):
list_index_files = glob.glob(os.path.join(persist_directory, 'index/*.bin'))
list_index_files += glob.glob(os.path.join(persist_directory, 'index/*.pkl'))
# At least 3 documents are needed in a working vectorstore
if len(list_index_files) > 3:
return True
return False
def main():
# Create embeddings
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
if does_vectorstore_exist(persist_directory):
# Update and store locally vectorstore
print(f"Appending to existing vectorstore at {persist_directory}")
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
collection = db.get()
texts = process_documents([metadata['source'] for metadata in collection['metadatas']])
print(f"Creating embeddings. May take some minutes...")
db.add_documents(texts)
else:
# Create and store locally vectorstore
print("Creating new vectorstore")
texts = process_documents()
print(f"Creating embeddings. May take some minutes...")
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)
db.persist()
db = None
print(f"Ingestion complete! You can now run privateGPT.py to query your documents")
if __name__ == "__main__":
main()

3833
examples/privategpt/poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,71 @@
#!/usr/bin/env python3
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from langchain.llms import Ollama
import os
import argparse
import time
model = os.environ.get("MODEL", "llama2-uncensored")
# For embeddings model, the example uses a sentence-transformers model
# https://www.sbert.net/docs/pretrained_models.html
# "The all-mpnet-base-v2 model provides the best quality, while all-MiniLM-L6-v2 is 5 times faster and still offers good quality."
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME", "all-MiniLM-L6-v2")
persist_directory = os.environ.get("PERSIST_DIRECTORY", "db")
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
from constants import CHROMA_SETTINGS
def main():
# Parse the command line arguments
args = parse_arguments()
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
# activate/deactivate the streaming StdOut callback for LLMs
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
llm = Ollama(model=model, callbacks=callbacks)
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source)
# Interactive questions and answers
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
if query.strip() == "":
continue
# Get the answer from the chain
start = time.time()
res = qa(query)
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
end = time.time()
# Print the result
print("\n\n> Question:")
print(query)
print(answer)
# Print the relevant sources used for the answer
for document in docs:
print("\n> " + document.metadata["source"] + ":")
print(document.page_content)
def parse_arguments():
parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
'using the power of LLMs.')
parser.add_argument("--hide-source", "-S", action='store_true',
help='Use this flag to disable printing of source documents used for answers.')
parser.add_argument("--mute-stream", "-M",
action='store_true',
help='Use this flag to disable the streaming StdOut callback for LLMs.')
return parser.parse_args()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,26 @@
[tool.poetry]
name = "privategpt"
version = "0.1.0"
description = ""
authors = ["Ivan Martinez <ivanmartit@gmail.com>"]
license = "Apache Version 2.0"
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.10"
langchain = "0.0.261"
gpt4all = "^1.0.3"
chromadb = "^0.3.26"
PyMuPDF = "^1.22.5"
python-dotenv = "^1.0.0"
unstructured = "^0.8.0"
extract-msg = "^0.41.5"
tabulate = "^0.9.0"
pandoc = "^2.3"
pypandoc = "^1.11"
tqdm = "^4.65.0"
sentence-transformers = "^2.2.2"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

File diff suppressed because it is too large Load Diff

38
examples/python/client.py Normal file
View File

@@ -0,0 +1,38 @@
import json
import requests
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
model = 'llama2' # TODO: update this for whatever model you wish to use
def generate(prompt, context):
r = requests.post('http://localhost:11434/api/generate',
json={
'model': model,
'prompt': prompt,
'context': context,
},
stream=True)
r.raise_for_status()
for line in r.iter_lines():
body = json.loads(line)
response_part = body.get('response', '')
# the response streams one token at a time, print that as we recieve it
print(response_part, end='', flush=True)
if 'error' in body:
raise Exception(body['error'])
if body.get('done', False):
return body['context']
def main():
context = [] # the context stores a conversation history, you can use this to make the model more context aware
while True:
user_input = input("Enter a prompt: ")
print()
context = generate(user_input, context)
print()
if __name__ == "__main__":
main()

View File

@@ -3,5 +3,5 @@
FROM nous-hermes
SYSTEM """
You are a content marketer who needs to come up with a short but succinct tweet. Make sure to include the appropriate hashtags and links. Sometimes when appropriate, describe a meme that can be includes as well. All answers should be in the form of a tweet which has a max size of 280 characters. Every instruction will be the topic to create a tweet about.
You are a content marketer who needs to come up with a short but succinct tweet. Make sure to include the appropriate hashtags and links. Sometimes when appropriate, describe a meme that can be included as well. All answers should be in the form of a tweet which has a max size of 280 characters. Every instruction will be the topic to create a tweet about.
"""

183
format/openssh.go Normal file
View File

@@ -0,0 +1,183 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code originally from https://go-review.googlesource.com/c/crypto/+/218620
// TODO: replace with upstream once the above change is merged and released.
package format
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"encoding/binary"
"encoding/pem"
"fmt"
"math/big"
"golang.org/x/crypto/ssh"
)
const privateKeyAuthMagic = "openssh-key-v1\x00"
type openSSHEncryptedPrivateKey struct {
CipherName string
KDFName string
KDFOptions string
KeysCount uint32
PubKey []byte
KeyBlocks []byte
}
type openSSHPrivateKey struct {
Check1 uint32
Check2 uint32
Keytype string
Rest []byte `ssh:"rest"`
}
type openSSHRSAPrivateKey struct {
N *big.Int
E *big.Int
D *big.Int
Iqmp *big.Int
P *big.Int
Q *big.Int
Comment string
Pad []byte `ssh:"rest"`
}
type openSSHECDSAPrivateKey struct {
Curve string
Pub []byte
D *big.Int
Comment string
Pad []byte `ssh:"rest"`
}
type openSSHEd25519PrivateKey struct {
Pub []byte
Priv []byte
Comment string
Pad []byte `ssh:"rest"`
}
func OpenSSHPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) {
var check uint32
if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil {
return nil, err
}
var pk1 openSSHPrivateKey
pk1.Check1 = check
pk1.Check2 = check
var w openSSHEncryptedPrivateKey
w.KeysCount = 1
if k, ok := key.(*ed25519.PrivateKey); ok {
key = *k
}
switch k := key.(type) {
case *rsa.PrivateKey:
e := new(big.Int).SetInt64(int64(k.E))
key := openSSHRSAPrivateKey{
N: k.N,
E: e,
D: k.D,
Iqmp: k.Precomputed.Qinv,
P: k.Primes[0],
Q: k.Primes[1],
Comment: comment,
}
pk1.Keytype = ssh.KeyAlgoRSA
pk1.Rest = ssh.Marshal(key)
w.PubKey = ssh.Marshal(struct {
KeyType string
E *big.Int
N *big.Int
}{
ssh.KeyAlgoRSA, e, k.N,
})
case *ecdsa.PrivateKey:
var curve, keytype string
switch name := k.Curve.Params().Name; name {
case "P-256":
curve = "nistp256"
keytype = ssh.KeyAlgoECDSA256
case "P-384":
curve = "nistp384"
keytype = ssh.KeyAlgoECDSA384
case "P-521":
curve = "nistp521"
keytype = ssh.KeyAlgoECDSA521
default:
return nil, fmt.Errorf("ssh: unknown curve %q", name)
}
pub := elliptic.Marshal(k.Curve, k.X, k.Y)
key := openSSHECDSAPrivateKey{
Curve: curve,
Pub: pub,
D: k.D,
Comment: comment,
}
pk1.Keytype = keytype
pk1.Rest = ssh.Marshal(key)
w.PubKey = ssh.Marshal(struct {
KeyType string
Curve string
Pub []byte
}{
keytype, curve, pub,
})
case ed25519.PrivateKey:
pub, priv := k[32:], k
key := openSSHEd25519PrivateKey{
Pub: pub,
Priv: priv,
Comment: comment,
}
pk1.Keytype = ssh.KeyAlgoED25519
pk1.Rest = ssh.Marshal(key)
w.PubKey = ssh.Marshal(struct {
KeyType string
Pub []byte
}{
ssh.KeyAlgoED25519, pub,
})
default:
return nil, fmt.Errorf("ssh: unknown key type %T", k)
}
w.KeyBlocks = openSSHPadding(ssh.Marshal(pk1), 8)
w.CipherName, w.KDFName, w.KDFOptions = "none", "none", ""
return &pem.Block{
Type: "OPENSSH PRIVATE KEY",
Bytes: append([]byte(privateKeyAuthMagic), ssh.Marshal(w)...),
}, nil
}
func openSSHPadding(block []byte, blocksize int) []byte {
for i, j := 0, len(block); (j+i)%blocksize != 0; i++ {
block = append(block, byte(i+1))
}
return block
}

View File

@@ -1 +0,0 @@
llama/ggml-metal.metal

3
go.mod
View File

@@ -14,7 +14,6 @@ require (
require github.com/rivo/uniseg v0.2.0 // indirect
require (
dario.cat/mergo v1.0.0
github.com/bytedance/sonic v1.9.1 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/chzyer/readline v1.5.1
@@ -33,6 +32,7 @@ require (
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
@@ -43,6 +43,7 @@ require (
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0
golang.org/x/text v0.10.0 // indirect
gonum.org/v1/gonum v0.13.0
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

6
go.sum
View File

@@ -1,5 +1,3 @@
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
@@ -80,6 +78,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
@@ -141,6 +141,8 @@ golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=

1
library/.gitignore vendored
View File

@@ -1 +0,0 @@
models

View File

@@ -1,7 +0,0 @@
https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_0.bin e84705205f71dd55be7b24a778f248f0eda9999a125d313358c087e092d83148
https://huggingface.co/TheBloke/Nous-Hermes-13B-GGML/resolve/main/nous-hermes-13b.ggmlv3.q4_0.bin d1735b93e1dc503f1045ccd6c8bd73277b18ba892befd1dc29e9b9a7822ed998
https://huggingface.co/TheBloke/vicuna-7B-v1.3-GGML/resolve/main/vicuna-7b-v1.3.ggmlv3.q4_0.bin 23ce5ed290b56a19305178b9ada2c3d96036bd69a6c18304b6158eb6672d6c0f
https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GGML/resolve/main/Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin 1f08b147a5bce41cfcbb3fd5d51ba765dea1786e15b5655ab69ba3a337a893b7
https://huggingface.co/TheBloke/Llama-2-7B-GGML/resolve/main/llama-2-7b.ggmlv3.q4_0.bin bfa26d855e44629c4cf919985e90bd7fa03b77eea1676791519e39a4d45fd4d5
https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q4_0.bin 8daa9615cce30c259a9555b1cc250d461d1bc69980a274b44d7eda0be78076d8
https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin f79142715bc9539a2edbb4b253548db8b34fac22736593eeaa28555874476e30

View File

@@ -1,147 +0,0 @@
FROM ../models/llama-2-7b-chat.ggmlv3.q4_0.bin
TEMPLATE """
{{- if .First }}
<<SYS>>
{{ .System }}
<</SYS>>
{{- end }}
[INST] {{ .Prompt }} [/INST]
"""
SYSTEM """
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
"""
LICENSE """
Llama 2 Community License Agreement
Llama 2 Version Release Date: July 18, 2023
“Agreement” means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.
“Documentation” means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/.
“Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entitys behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
“Llama 2” means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/.
“Llama Materials” means, collectively, Metas proprietary Llama 2 and Documentation (and any portion thereof) made available under this Agreement.
“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
By clicking “I Accept” below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement.
1. License Rights and Redistribution.
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Metas intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials.
b. Redistribution and Use.
i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party.
ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you.
iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a “Notice” text file distributed as a part of such copies: “Llama 2 is licensed under the LLAMA 2 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.”
iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement.
v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof).
2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensees affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
5. Intellectual Property.
a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials.
b. Subject to Metas ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.
c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.
6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.
7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.
"""
LICENSE """
Llama 2 Acceptable Use Policy
Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at ai.meta.com/llama/use-policy.
Prohibited Uses
We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
1. Violate the law or others rights, including to:
a. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
i. Violence or terrorism
ii. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
b. Human trafficking, exploitation, and sexual violence
iii. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
iv. Sexual solicitation
vi. Any other criminal activity
c. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
d. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
e. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
f. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
g. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
h. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
a. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
b. Guns and illegal weapons (including weapon development)
c. Illegal drugs and regulated/controlled substances
d. Operation of critical infrastructure, transportation technologies, or heavy machinery
e. Self-harm or harm to others, including suicide, cutting, and eating disorders
f. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
a. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
b. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
c. Generating, promoting, or further distributing spam
d. Impersonating another individual without consent, authorization, or legal right
e. Representing that the use of Llama 2 or outputs are human-generated
f. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
4. Fail to appropriately disclose to end users any known dangers of your AI system
Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
Reporting issues with the model: github.com/facebookresearch/llama
Reporting risky content generated by the model: developers.facebook.com/llama_output_feedback
Reporting bugs and security concerns: facebook.com/whitehat/info
Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@meta.com
"""

View File

@@ -1,147 +0,0 @@
FROM ../models/llama-2-13b-chat.ggmlv3.q4_0.bin
TEMPLATE """
{{- if .First }}
<<SYS>>
{{ .System }}
<</SYS>>
{{- end }}
[INST] {{ .Prompt }} [/INST]
"""
SYSTEM """
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
"""
LICENSE """
Llama 2 Community License Agreement
Llama 2 Version Release Date: July 18, 2023
“Agreement” means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.
“Documentation” means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/.
“Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entitys behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
“Llama 2” means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/.
“Llama Materials” means, collectively, Metas proprietary Llama 2 and Documentation (and any portion thereof) made available under this Agreement.
“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
By clicking “I Accept” below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement.
1. License Rights and Redistribution.
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Metas intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials.
b. Redistribution and Use.
i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party.
ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you.
iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a “Notice” text file distributed as a part of such copies: “Llama 2 is licensed under the LLAMA 2 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.”
iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement.
v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof).
2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensees affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
5. Intellectual Property.
a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials.
b. Subject to Metas ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.
c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.
6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.
7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.
"""
LICENSE """
Llama 2 Acceptable Use Policy
Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at ai.meta.com/llama/use-policy.
Prohibited Uses
We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
1. Violate the law or others rights, including to:
a. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
i. Violence or terrorism
ii. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
b. Human trafficking, exploitation, and sexual violence
iii. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
iv. Sexual solicitation
vi. Any other criminal activity
c. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
d. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
e. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
f. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
g. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
h. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
a. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
b. Guns and illegal weapons (including weapon development)
c. Illegal drugs and regulated/controlled substances
d. Operation of critical infrastructure, transportation technologies, or heavy machinery
e. Self-harm or harm to others, including suicide, cutting, and eating disorders
f. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
a. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
b. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
c. Generating, promoting, or further distributing spam
d. Impersonating another individual without consent, authorization, or legal right
e. Representing that the use of Llama 2 or outputs are human-generated
f. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
4. Fail to appropriately disclose to end users any known dangers of your AI system
Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
Reporting issues with the model: github.com/facebookresearch/llama
Reporting risky content generated by the model: developers.facebook.com/llama_output_feedback
Reporting bugs and security concerns: facebook.com/whitehat/info
Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@meta.com
"""

View File

@@ -1,147 +0,0 @@
FROM ../models/llama-2-7b-chat.ggmlv3.q4_0.bin
TEMPLATE """
{{- if .First }}
<<SYS>>
{{ .System }}
<</SYS>>
{{- end }}
[INST] {{ .Prompt }} [/INST]
"""
SYSTEM """
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
"""
LICENSE """
Llama 2 Community License Agreement
Llama 2 Version Release Date: July 18, 2023
“Agreement” means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.
“Documentation” means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/.
“Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entitys behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
“Llama 2” means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/.
“Llama Materials” means, collectively, Metas proprietary Llama 2 and Documentation (and any portion thereof) made available under this Agreement.
“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
By clicking “I Accept” below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement.
1. License Rights and Redistribution.
a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Metas intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials.
b. Redistribution and Use.
i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party.
ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you.
iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a “Notice” text file distributed as a part of such copies: “Llama 2 is licensed under the LLAMA 2 Community License, Copyright © Meta Platforms, Inc. All Rights Reserved.”
iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement.
v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof).
2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensees affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.
3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.
4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
5. Intellectual Property.
a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials.
b. Subject to Metas ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.
c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.
6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.
7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.
"""
LICENSE """
Llama 2 Acceptable Use Policy
Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at ai.meta.com/llama/use-policy.
Prohibited Uses
We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to:
1. Violate the law or others rights, including to:
a. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:
i. Violence or terrorism
ii. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material
b. Human trafficking, exploitation, and sexual violence
iii. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.
iv. Sexual solicitation
vi. Any other criminal activity
c. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals
d. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services
e. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices
f. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws
g. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials
h. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system
2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following:
a. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State
b. Guns and illegal weapons (including weapon development)
c. Illegal drugs and regulated/controlled substances
d. Operation of critical infrastructure, transportation technologies, or heavy machinery
e. Self-harm or harm to others, including suicide, cutting, and eating disorders
f. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual
3. Intentionally deceive or mislead others, including use of Llama 2 related to the following:
a. Generating, promoting, or furthering fraud or the creation or promotion of disinformation
b. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content
c. Generating, promoting, or further distributing spam
d. Impersonating another individual without consent, authorization, or legal right
e. Representing that the use of Llama 2 or outputs are human-generated
f. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement
4. Fail to appropriately disclose to end users any known dangers of your AI system
Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means:
Reporting issues with the model: github.com/facebookresearch/llama
Reporting risky content generated by the model: developers.facebook.com/llama_output_feedback
Reporting bugs and security concerns: facebook.com/whitehat/info
Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@meta.com
"""

View File

@@ -1,7 +0,0 @@
FROM ../models/nous-hermes-13b.ggmlv3.q4_0.bin
TEMPLATE """
### Instruction:
{{ .Prompt }}
### Response:
"""

View File

@@ -1,14 +0,0 @@
FROM ../models/orca-mini-3b.ggmlv3.q4_0.bin
TEMPLATE """
{{- if .First }}
### System:
{{ .System }}
{{- end }}
### User:
{{ .Prompt }}
### Response:
"""
SYSTEM """You are an AI assistant that follows instruction extremely well. Help as much as you can."""

View File

@@ -1,11 +0,0 @@
FROM ../models/vicuna-7b-v1.3.ggmlv3.q4_0.bin
TEMPLATE """
{{ if .First }}
{{ .System }}
{{- end }}
USER: {{ .Prompt }}
ASSISTANT:
"""
SYSTEM """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."""

View File

@@ -1,5 +0,0 @@
FROM ../models/Wizard-Vicuna-13B-Uncensored.ggmlv3.q4_0.bin
TEMPLATE """
USER: {{ .Prompt }}
ASSISTANT:
"""

View File

@@ -1,52 +0,0 @@
#!/bin/bash
mkdir -p models
# download binaries
function process_line {
local url=$1
local checksum=$2
# Get the filename from the URL
local filename=models/$(basename $url)
echo "verifying $filename..."
# If the file exists, compute its checksum
if [ -f $filename ]; then
local existing_checksum=$(shasum -a 256 $filename | cut -d ' ' -f1)
fi
# If the file does not exist, or its checksum does not match, download it
if [ ! -f $filename ] || [ $existing_checksum != $checksum ]; then
echo "downloading $filename..."
# Download the file
curl -L $url -o $filename
# Compute the SHA256 hash of the downloaded file
local computed_checksum=$(shasum -a 256 $filename | cut -d ' ' -f1)
# Verify the checksum
if [ $computed_checksum != $checksum ]; then
echo "Checksum verification failed for $filename"
exit 1
fi
fi
}
while IFS=' ' read -r url checksum
do
process_line $url $checksum
done < "downloads"
# create and publish the models
for file in modelfiles/*; do
if [ -f "$file" ]; then
filename=$(basename "$file")
echo $filename
ollama create "library/${filename}" -f "$file"
ollama push "${filename}"
fi
done

View File

@@ -1,282 +0,0 @@
package llama
/*
#cgo CPPFLAGS: -O3 -DNDEBUG=1
#cgo CXXFLAGS: -std=c++11
#cgo darwin CPPFLAGS: -DGGML_USE_METAL=1 -DGGML_METAL_NDEBUG=1
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#include <stdlib.h>
#include "llama.h"
struct llama_sample_options
{
float repeat_penalty;
float frequency_penalty;
float presence_penalty;
float temperature;
int32_t top_k;
float top_p;
float tfs_z;
float typical_p;
int mirostat;
float mirostat_tau;
float mirostat_eta;
};
llama_token llama_sample(
struct llama_context *ctx,
struct llama_token_data *candidates,
size_t n_candidates,
const llama_token *last_tokens,
size_t n_last_tokens,
struct llama_sample_options *opts)
{
llama_token_data_array candidates_p = {
candidates,
n_candidates,
false,
};
llama_sample_repetition_penalty(
ctx, &candidates_p,
last_tokens, n_last_tokens,
opts->repeat_penalty);
llama_sample_frequency_and_presence_penalties(
ctx, &candidates_p,
last_tokens, n_last_tokens,
opts->frequency_penalty, opts->presence_penalty);
if (opts->temperature <= 0) {
return llama_sample_token_greedy(ctx, &candidates_p);
}
if (opts->mirostat == 1) {
int mirostat_m = 100;
float mirostat_mu = 2.0f * opts->mirostat_tau;
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token_mirostat(
ctx, &candidates_p,
opts->mirostat_tau, opts->mirostat_eta,
mirostat_m, &mirostat_mu);
} else if (opts->mirostat == 2) {
float mirostat_mu = 2.0f * opts->mirostat_tau;
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token_mirostat_v2(
ctx, &candidates_p,
opts->mirostat_tau, opts->mirostat_eta,
&mirostat_mu);
} else {
llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token(ctx, &candidates_p);
}
}
*/
import "C"
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"unicode/utf8"
"unsafe"
"github.com/jmorganca/ollama/api"
)
type llama struct {
params *C.struct_llama_context_params
model *C.struct_llama_model
ctx *C.struct_llama_context
api.Options
}
func New(model string, opts api.Options) (*llama, error) {
if _, err := os.Stat(model); err != nil {
return nil, err
}
llm := llama{Options: opts}
C.llama_backend_init(C.bool(llm.UseNUMA))
params := C.llama_context_default_params()
params.seed = C.uint(llm.Seed)
params.n_ctx = C.int(llm.NumCtx)
params.n_batch = C.int(llm.NumBatch)
params.n_gpu_layers = C.int(llm.NumGPU)
params.main_gpu = C.int(llm.MainGPU)
params.low_vram = C.bool(llm.LowVRAM)
params.f16_kv = C.bool(llm.F16KV)
params.logits_all = C.bool(llm.LogitsAll)
params.vocab_only = C.bool(llm.VocabOnly)
params.use_mmap = C.bool(llm.UseMMap)
params.use_mlock = C.bool(llm.UseMLock)
params.embedding = C.bool(llm.EmbeddingOnly)
llm.params = &params
cModel := C.CString(model)
defer C.free(unsafe.Pointer(cModel))
llm.model = C.llama_load_model_from_file(cModel, params)
if llm.model == nil {
return nil, errors.New("failed to load model")
}
llm.ctx = C.llama_new_context_with_model(llm.model, params)
if llm.ctx == nil {
return nil, errors.New("failed to create context")
}
// warm up the model
bos := []C.llama_token{C.llama_token_bos()}
C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
C.llama_reset_timings(llm.ctx)
return &llm, nil
}
func (llm *llama) Close() {
defer C.llama_free_model(llm.model)
defer C.llama_free(llm.ctx)
C.llama_print_timings(llm.ctx)
}
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
if input := llm.tokenize(prompt); input != nil {
embd := make([]C.llama_token, len(ctx))
for i := range ctx {
embd[i] = C.llama_token(ctx[i])
}
return llm.generate(append(embd, input...), fn)
}
return errors.New("llama: tokenize")
}
func (llm *llama) tokenize(prompt string) []C.llama_token {
cPrompt := C.CString(prompt)
defer C.free(unsafe.Pointer(cPrompt))
tokens := make([]C.llama_token, llm.NumCtx)
if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(tokens), C.int(len(tokens)), true); n > 0 {
return tokens[:n]
}
return nil
}
func (llm *llama) detokenize(tokens ...C.llama_token) string {
var sb strings.Builder
for _, token := range tokens {
sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, token)))
}
return sb.String()
}
func (llm *llama) generate(input []C.llama_token, fn func(api.GenerateResponse)) error {
var opts C.struct_llama_sample_options
opts.repeat_penalty = C.float(llm.RepeatPenalty)
opts.frequency_penalty = C.float(llm.FrequencyPenalty)
opts.presence_penalty = C.float(llm.PresencePenalty)
opts.temperature = C.float(llm.Temperature)
opts.top_k = C.int(llm.TopK)
opts.top_p = C.float(llm.TopP)
opts.tfs_z = C.float(llm.TFSZ)
opts.typical_p = C.float(llm.TypicalP)
opts.mirostat = C.int(llm.Mirostat)
opts.mirostat_tau = C.float(llm.MirostatTau)
opts.mirostat_eta = C.float(llm.MirostatEta)
output := deque[C.llama_token]{capacity: llm.NumCtx}
context := deque[int]{capacity: llm.NumCtx / 2}
for _, in := range input {
context.PushLeft(int(in))
}
var b bytes.Buffer
for C.llama_get_kv_cache_token_count(llm.ctx) < C.int(llm.NumCtx) {
if retval := C.llama_eval(llm.ctx, unsafe.SliceData(input), C.int(len(input)), C.llama_get_kv_cache_token_count(llm.ctx), C.int(llm.NumThread)); retval != 0 {
return errors.New("llama: eval")
}
token, err := llm.sample(output, &opts)
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
b.WriteString(llm.detokenize(token))
if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
// call the callback
fn(api.GenerateResponse{
Response: b.String(),
})
output.PushLeft(token)
context.PushLeft(int(token))
b.Reset()
}
input = []C.llama_token{token}
}
dur := func(ms float64) time.Duration {
d, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
if err != nil {
panic(err)
}
return d
}
timings := C.llama_get_timings(llm.ctx)
fn(api.GenerateResponse{
Done: true,
Context: context.Data(),
PromptEvalCount: int(timings.n_p_eval),
PromptEvalDuration: dur(float64(timings.t_p_eval_ms)),
EvalCount: int(timings.n_eval),
EvalDuration: dur(float64(timings.t_eval_ms)),
})
return nil
}
func (llm *llama) sample(output deque[C.llama_token], opts *C.struct_llama_sample_options) (C.llama_token, error) {
numVocab := int(C.llama_n_vocab(llm.ctx))
logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)
candidates := deque[C.struct_llama_token_data]{capacity: numVocab}
for i := 0; i < candidates.Cap(); i++ {
candidates.PushLeft(C.struct_llama_token_data{
id: C.int(i),
logit: logits[i],
p: 0,
})
}
token := C.llama_sample(
llm.ctx,
unsafe.SliceData(candidates.Data()), C.size_t(candidates.Len()),
unsafe.SliceData(output.Data()), C.size_t(output.Len()),
opts)
if token != C.llama_token_eos() {
return token, nil
}
return 0, io.EOF
}

View File

@@ -1,104 +0,0 @@
package llama
type node[T any] struct {
t T
next *node[T]
prev *node[T]
}
type deque[T any] struct {
head *node[T]
tail *node[T]
size int
capacity int
}
func (d *deque[T]) Empty() bool {
return d.size == 0
}
func (d *deque[T]) Len() int {
return d.size
}
func (d *deque[T]) Cap() int {
return d.capacity
}
func (d *deque[T]) Push(t T) {
if d.capacity > 0 && d.size >= d.capacity {
d.PopLeft()
}
n := node[T]{t: t}
if d.head != nil {
n.next = d.head
d.head.prev = &n
d.head = &n
} else {
d.head = &n
d.tail = &n
}
d.size++
}
func (d *deque[T]) PushLeft(t T) {
if d.capacity > 0 && d.size >= d.capacity {
d.Pop()
}
n := node[T]{t: t}
if d.tail != nil {
n.prev = d.tail
d.tail.next = &n
d.tail = &n
} else {
d.head = &n
d.tail = &n
}
d.size++
}
func (d *deque[T]) Pop() *T {
if d.Empty() {
return nil
}
head := d.head
d.head = head.next
if d.head != nil {
d.head.prev = nil
} else {
d.tail = nil
}
d.size--
return &head.t
}
func (d *deque[T]) PopLeft() *T {
if d.Empty() {
return nil
}
tail := d.tail
d.tail = tail.prev
if d.tail != nil {
d.tail.next = nil
} else {
d.head = nil
}
d.size--
return &tail.t
}
func (d *deque[T]) Data() (data []T) {
for n := d.head; n != nil; n = n.next {
data = append(data, n.t)
}
return data
}

575
llm/ggml-alloc.c Normal file
View File

@@ -0,0 +1,575 @@
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ggml-alloc.h"
#include "ggml.h"
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define UNUSED(x) (void)(x)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
//#define GGML_ALLOCATOR_DEBUG
//#define AT_PRINTF printf
#define AT_PRINTF(...) ((void)0)
struct hash_node {
struct ggml_tensor * t;
int n_children;
int n_views;
};
static size_t hash(void * p) {
return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
}
static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_tensor * t) {
size_t h = hash(t);
// linear probing
size_t i = h;
while (hash_table[i].t != NULL) {
if (hash_table[i].t == t) {
return &hash_table[i];
}
i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
if (i == h) {
// hash table is full
GGML_ASSERT(false);
}
}
hash_table[i].t = t;
return &hash_table[i];
}
// TODO: GGML_PAD ?
static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
assert(alignment && !(alignment & (alignment - 1))); // power of 2
size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
return offset + align;
}
struct free_block {
void * addr;
size_t size;
};
#define MAX_FREE_BLOCKS 128
struct ggml_allocr {
void * data;
size_t size;
size_t alignment;
int n_free_blocks;
struct free_block free_blocks[MAX_FREE_BLOCKS];
struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE];
size_t max_size;
bool measure;
#ifdef GGML_ALLOCATOR_DEBUG
struct ggml_tensor * allocated_tensors[1024];
#endif
};
#ifdef GGML_ALLOCATOR_DEBUG
static void add_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i] == NULL) {
alloc->allocated_tensors[i] = tensor;
return;
}
}
GGML_ASSERT(!"out of allocated_tensors");
}
static void remove_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i] == tensor ||
(alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
alloc->allocated_tensors[i] = NULL;
return;
}
}
printf("tried to free tensor %s not found\n", tensor->name);
GGML_ASSERT(!"tensor not found");
}
#endif
static size_t ggml_allocator_get_alloc_size(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
return ggml_nbytes(tensor);
UNUSED(alloc);
}
void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
size = aligned_offset(NULL, size, alloc->alignment);
AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
size_t max_avail = 0;
// find the best fitting free block
int best_fit_block = -1;
size_t best_fit_size = SIZE_MAX;
for (int i = 0; i < alloc->n_free_blocks; i++) {
struct free_block * block = &alloc->free_blocks[i];
max_avail = MAX(max_avail, block->size);
if (block->size >= size && block->size <= best_fit_size) {
best_fit_block = i;
best_fit_size = block->size;
}
}
AT_PRINTF("block %d\n", best_fit_block);
if (best_fit_block == -1) {
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
__func__, size, max_avail);
GGML_ASSERT(!"not enough space in the buffer");
return;
}
struct free_block * block = &alloc->free_blocks[best_fit_block];
void * addr = block->addr;
block->addr = (char*)block->addr + size;
block->size -= size;
if (block->size == 0) {
// remove block if empty
alloc->n_free_blocks--;
for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
alloc->free_blocks[j] = alloc->free_blocks[j+1];
}
}
tensor->data = addr;
#ifdef GGML_ALLOCATOR_DEBUG
add_allocated_tensor(alloc, tensor);
size_t cur_max = (char*)addr - (char*)alloc->data + size;
if (cur_max > alloc->max_size) {
printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i]) {
printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
}
}
printf("\n");
}
#endif
alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
}
// this is a very naive implementation, but for our case the number of free blocks should be very small
static void ggml_allocator_free_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
void * ptr = tensor->data;
if (ptr < alloc->data || (char*)ptr >= (char*)alloc->data + alloc->max_size) {
// the tensor was not allocated in this buffer
// this can happen because the graph allocator will try to free weights and other tensors from different buffers
// the easiest way to deal with this is just to ignore it
return;
}
size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
size = aligned_offset(NULL, size, alloc->alignment);
AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
#ifdef GGML_ALLOCATOR_DEBUG
remove_allocated_tensor(alloc, tensor);
#endif
// see if we can merge with an existing block
for (int i = 0; i < alloc->n_free_blocks; i++) {
struct free_block * block = &alloc->free_blocks[i];
// check if ptr is at the end of the block
if ((char*)block->addr + block->size == ptr) {
block->size += size;
// check if we can merge with the next block
if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
block->size += alloc->free_blocks[i+1].size;
alloc->n_free_blocks--;
for (int j = i+1; j < alloc->n_free_blocks; j++) {
alloc->free_blocks[j] = alloc->free_blocks[j+1];
}
}
return;
}
// check if ptr is at the beginning of the block
if ((char*)ptr + size == block->addr) {
block->addr = ptr;
block->size += size;
// check if we can merge with the previous block
if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
alloc->free_blocks[i-1].size += block->size;
alloc->n_free_blocks--;
for (int j = i; j < alloc->n_free_blocks; j++) {
alloc->free_blocks[j] = alloc->free_blocks[j+1];
}
}
return;
}
}
// otherwise, add a new block
GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
// insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
int insert_pos = 0;
while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
insert_pos++;
}
// shift all blocks from insert_pos onward to make room for the new block
for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
alloc->free_blocks[i] = alloc->free_blocks[i-1];
}
// insert the new block
alloc->free_blocks[insert_pos].addr = ptr;
alloc->free_blocks[insert_pos].size = size;
alloc->n_free_blocks++;
}
void ggml_allocr_reset(struct ggml_allocr * alloc) {
alloc->n_free_blocks = 1;
size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
alloc->free_blocks[0].size = alloc->size - align_offset;
}
struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) {
struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
*alloc = (struct ggml_allocr){
/*.data = */ data,
/*.size = */ size,
/*.alignment = */ alignment,
/*.n_free_blocks = */ 0,
/*.free_blocks = */ {{0}},
/*.hash_table = */ {{0}},
/*.max_size = */ 0,
/*.measure = */ false,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0},
#endif
};
ggml_allocr_reset(alloc);
return alloc;
}
// address and size of the buffer when measuring
// it needs to be large enough to fit all the tensors, but it cannot overlap with other existing buffers
static void * const MEASURE_BASE_ADDR = (void *) 0x1000;
static const size_t MEASURE_MAX_SIZE = 1ULL<<40; // 1 TB
struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
*alloc = (struct ggml_allocr){
/*.data = */ MEASURE_BASE_ADDR,
/*.size = */ MEASURE_MAX_SIZE,
/*.alignment = */ alignment,
/*.n_free_blocks = */ 0,
/*.free_blocks = */ {{0}},
/*.hash_table = */ {{0}},
/*.max_size = */ 0,
/*.measure = */ true,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0},
#endif
};
ggml_allocr_reset(alloc);
return alloc;
}
void ggml_allocr_free(struct ggml_allocr * alloc) {
free(alloc);
}
bool ggml_allocr_is_measure(struct ggml_allocr * alloc) {
return alloc->measure;
}
//////////// compute graph allocator
static bool ggml_is_view(struct ggml_tensor * t) {
return t->op == GGML_OP_RESHAPE || t->op == GGML_OP_VIEW || t->op == GGML_OP_TRANSPOSE ||
t->op == GGML_OP_PERMUTE || t->op == GGML_OP_CPY;
}
static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
if (a->type != b->type) {
return false;
}
for (int i = 0; i < GGML_MAX_DIMS; i++) {
if (a->ne[i] != b->ne[i]) {
return false;
}
if (a->nb[i] != b->nb[i]) {
return false;
}
}
return true;
}
static struct ggml_tensor * get_view_parent(struct ggml_tensor * t) {
switch (t->op) {
case GGML_OP_PERMUTE:
case GGML_OP_RESHAPE:
case GGML_OP_TRANSPOSE:
case GGML_OP_VIEW:
return t->src[0];
case GGML_OP_CPY:
return t->src[1];
default:
return NULL;
}
}
static struct ggml_tensor * get_view_source(struct ggml_tensor * t) {
struct ggml_tensor * parent = t;
do {
parent = get_view_parent(parent);
} while (ggml_is_view(parent));
return parent;
}
static bool ggml_op_can_inplace(enum ggml_op op) {
switch (op) {
case GGML_OP_SCALE:
case GGML_OP_DIAG_MASK_ZERO:
case GGML_OP_DIAG_MASK_INF:
case GGML_OP_ADD:
case GGML_OP_ADD1:
case GGML_OP_ACC:
case GGML_OP_SUB:
case GGML_OP_MUL:
case GGML_OP_DIV:
case GGML_OP_SQR:
case GGML_OP_SQRT:
case GGML_OP_LOG:
case GGML_OP_UNARY:
case GGML_OP_ROPE:
case GGML_OP_RMS_NORM:
case GGML_OP_SET:
case GGML_OP_SOFT_MAX:
case GGML_OP_CONT:
return true;
default:
return false;
}
}
static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) {
struct hash_node * ht = alloc->hash_table;
if (node->data == NULL) {
if (ggml_is_view(node)) {
size_t offset;
switch(node->op) {
case GGML_OP_VIEW:
memcpy(&offset, node->op_params, sizeof(size_t));
node->data = (char *) node->src[0]->data + offset;
break;
case GGML_OP_PERMUTE:
case GGML_OP_RESHAPE:
case GGML_OP_TRANSPOSE:
node->data = node->src[0]->data;
break;
case GGML_OP_CPY:
node->data = node->src[1]->data;
break;
default:
GGML_ASSERT(!"unknown view op");
break;
}
} else {
// see if we can reuse a parent's buffer (inplace)
if (ggml_op_can_inplace(node->op)) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
struct ggml_tensor * parent = node->src[i];
if (parent == NULL) {
break;
}
// if the node's data is external, then we cannot re-use it
if ((char *) parent->data < (char *) alloc->data ||
(char *) parent->data >= ((char *) alloc->data + alloc->size)) {
AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
continue;
}
struct hash_node * p_hn = hash_get(ht, parent);
if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) {
if (ggml_is_view(parent)) {
struct ggml_tensor * view_src = get_view_source(parent);
struct hash_node * view_src_hn = hash_get(ht, view_src);
if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
// TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
// the parent's data that it will need later (same layout requirement). the problem is that then
// we cannot free the tensor because the original address of the allocation is lost.
// adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
// for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
node->data = parent->data;
return;
}
}
else {
AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
node->data = parent->data;
}
return;
}
}
}
ggml_allocr_alloc(alloc, node);
}
}
}
static size_t ggml_allocator_alloc_graph_tensors_n(
struct ggml_allocr * alloc,
struct ggml_cgraph ** graphs, int n_graphs,
struct ggml_tensor *** inputs, struct ggml_tensor *** outputs) {
// reset hash table
struct hash_node * ht = alloc->hash_table;
memset(ht, 0, sizeof(struct hash_node) * GGML_GRAPH_HASHTABLE_SIZE);
// count number of children and views
for (int g = 0; g < n_graphs; g++) {
struct ggml_cgraph * gf = graphs[g];
for (int i = 0; i < gf->n_nodes; i++) {
struct ggml_tensor * node = gf->nodes[i];
if (ggml_is_view(node)) {
struct ggml_tensor * view_src = get_view_source(node);
hash_get(ht, view_src)->n_views += 1;
}
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
hash_get(ht, parent)->n_children += 1;
}
}
}
// allocate tensors
for (int g = 0; g < n_graphs; g++) {
struct ggml_cgraph * gf = graphs[g];
AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
// graph inputs are allocated first to ensure that they are not overwritten by each other
if (inputs != NULL && inputs[g] != NULL) {
for (int i = 0; inputs[g][i] != NULL; i++) {
struct ggml_tensor * input = inputs[g][i];
AT_PRINTF("input: %s\n", input->name);
allocate_node(alloc, input);
}
}
for (int i = 0; i < gf->n_nodes; i++) {
struct ggml_tensor * node = gf->nodes[i];
// allocate parents (leafs)
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
allocate_node(alloc, parent);
}
// allocate node
allocate_node(alloc, node);
AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
AT_PRINTF("%s", parent->name);
if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
AT_PRINTF(", ");
}
}
AT_PRINTF("\n");
// update parents
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
struct hash_node * p_hn = hash_get(ht, parent);
p_hn->n_children -= 1;
//AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
if (p_hn->n_children == 0 && p_hn->n_views == 0) {
if (ggml_is_view(parent)) {
struct ggml_tensor * view_src = get_view_source(parent);
struct hash_node * view_src_hn = hash_get(ht, view_src);
view_src_hn->n_views -= 1;
AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src->n_children, view_src->n_views);
if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
ggml_allocator_free_tensor(alloc, view_src);
}
}
else {
if (parent->data != node->data) {
ggml_allocator_free_tensor(alloc, parent);
}
}
}
}
AT_PRINTF("\n");
}
// free graph outputs here that wouldn't be freed otherwise because they have no children
if (outputs != NULL && outputs[g] != NULL) {
for (int i = 0; outputs[g][i] != NULL; i++) {
struct ggml_tensor * output = outputs[g][i];
AT_PRINTF("output: %s\n", output->name);
ggml_allocator_free_tensor(alloc, output);
}
}
}
return alloc->max_size;
}
size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph) {
return ggml_allocator_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
}

48
llm/ggml-alloc.h Normal file
View File

@@ -0,0 +1,48 @@
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);
GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);
GGML_API void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor);
GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph);
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -53,6 +53,7 @@ void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
void ggml_cuda_set_main_device(int main_device);
void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
void ggml_cuda_set_scratch_size(size_t scratch_size);
void ggml_cuda_free_scratch(void);
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);

View File

@@ -1,7 +1,7 @@
//go:build darwin
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -89,6 +89,13 @@ void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor *
// get data from the device into host memory
void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
// try to find operations that can be run concurrently in the graph
// you should run it again if the topology of your graph changes
void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
// if the graph has been optimized for concurrently dispatch
bool ggml_metal_if_optimized(struct ggml_metal_context * ctx);
// same as ggml_graph_compute but uses Metal
// creates gf->n_threads command buffers in parallel
void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);

View File

@@ -1,7 +1,7 @@
//go:build darwin
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -35,6 +35,11 @@
#import <Metal/Metal.h>
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
#undef MIN
#undef MAX
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#ifdef GGML_METAL_NDEBUG
#define metal_printf(...)
#else
@@ -43,6 +48,8 @@
#define UNUSED(x) (void)(x)
#define GGML_MAX_CONCUR (2*GGML_MAX_NODES)
struct ggml_metal_buffer {
const char * name;
@@ -64,12 +71,16 @@ struct ggml_metal_context {
int n_buffers;
struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
int concur_list[GGML_MAX_CONCUR];
int concur_list_len;
// custom kernels
#define GGML_METAL_DECL_KERNEL(name) \
id<MTLFunction> function_##name; \
id<MTLComputePipelineState> pipeline_##name
GGML_METAL_DECL_KERNEL(add);
GGML_METAL_DECL_KERNEL(add_row); // TODO: avoid this extra kernel, instead extend the "add" kernel to support broadcast
GGML_METAL_DECL_KERNEL(mul);
GGML_METAL_DECL_KERNEL(mul_row); // TODO: avoid this extra kernel, instead extend the "mul" kernel to support broadcast
GGML_METAL_DECL_KERNEL(scale);
@@ -125,6 +136,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
ctx->device = MTLCreateSystemDefaultDevice();
ctx->queue = [ctx->device newCommandQueue];
ctx->n_buffers = 0;
ctx->concur_list_len = 0;
// determine if we can use MPS
if (MPSSupportsMTLDevice(ctx->device)) {
@@ -142,7 +154,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error];
if (error) {
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
exit(1);
return NULL;
}
}
#else
@@ -160,7 +172,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
if (error) {
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
exit(1);
return NULL;
}
#ifdef GGML_QKK_64
@@ -172,7 +184,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
#endif
if (error) {
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
exit(1);
return NULL;
}
}
#endif
@@ -185,6 +197,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name);
GGML_METAL_ADD_KERNEL(add);
GGML_METAL_ADD_KERNEL(add_row);
GGML_METAL_ADD_KERNEL(mul);
GGML_METAL_ADD_KERNEL(mul_row);
GGML_METAL_ADD_KERNEL(scale);
@@ -243,6 +256,13 @@ void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) {
ctx->n_cb = n_cb;
}
bool ggml_metal_if_optimized(struct ggml_metal_context * ctx) {
if (ctx->concur_list_len) {
return true;
}
return false;
}
// finds the Metal buffer that contains the tensor data on the GPU device
// the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
// Metal buffer based on the host memory pointer
@@ -381,11 +401,112 @@ void ggml_metal_get_tensor(
memcpy(t->data, (void *) ((uint8_t *) id_src.contents + offs), ggml_nbytes(t));
}
void ggml_metal_graph_find_concurrency(
struct ggml_metal_context * ctx,
struct ggml_cgraph * gf) {
int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time
int nodes_unused[GGML_MAX_CONCUR];
for (int i = 0; i < GGML_MAX_CONCUR; i++) { ctx->concur_list[i] = 0; }
for (int i = 0; i < gf->n_nodes; i++) { nodes_unused[i] = 1; }
ctx->concur_list_len = 0;
int n_left = gf->n_nodes;
int n_start = 0; // all nodes before n_start at nodes_unused array have been sorted and store back to ctx->concur_list
int level_pos = 0; // at ctx->concur_list, the last layer (level) ends at level_pos
while (n_left > 0) {
// number of nodes at a layer (that can be issued concurrently)
int concurrency = 0;
for (int i = n_start; i < ((n_start + search_depth > gf->n_nodes) ? gf->n_nodes : n_start + search_depth); i++) {
if (nodes_unused[i]) {
// if the requirements for gf->nodes[i] are satisfied
int exe_flag = 1;
// scan all srcs
for (int src_ind = 0; src_ind < GGML_MAX_SRC; src_ind++) {
struct ggml_tensor * src_cur = gf->nodes[i]->src[src_ind];
if (src_cur) {
// if is leaf nodes it's satisfied.
// TODO: ggml_is_leaf()
if (src_cur->op == GGML_OP_NONE && src_cur->grad == NULL) {
continue;
}
// otherwise this src should be the output from previous nodes.
int is_found = 0;
// scan 2*search_depth back because we inserted barrier.
//for (int j = ((level_pos - 2*search_depth) < 0 ? 0 : (level_pos - 2*search_depth)); j < level_pos; j++) {
for (int j = MAX(0, level_pos - 2*search_depth); j < level_pos; j++) {
if (ctx->concur_list[j] >= 0 && gf->nodes[ctx->concur_list[j]] == src_cur) {
is_found = 1;
break;
}
}
if (is_found == 0) {
exe_flag = 0;
break;
}
}
}
if (exe_flag) {
// check if nodes[i]'s data will be overwritten by a node before nodes[i].
// if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3]
int64_t data_start = (int64_t) gf->nodes[i]->data;
int64_t length = (int64_t) ggml_nbytes(gf->nodes[i]);
for (int j = n_start; j < i; j++) {
if (nodes_unused[j] && gf->nodes[j]->op != GGML_OP_RESHAPE \
&& gf->nodes[j]->op != GGML_OP_VIEW \
&& gf->nodes[j]->op != GGML_OP_TRANSPOSE \
&& gf->nodes[j]->op != GGML_OP_PERMUTE) {
if (((int64_t)gf->nodes[j]->data) >= data_start + length || \
((int64_t)gf->nodes[j]->data) + (int64_t) ggml_nbytes(gf->nodes[j]) <= data_start) {
continue;
}
exe_flag = 0;
}
}
}
if (exe_flag) {
ctx->concur_list[level_pos + concurrency] = i;
nodes_unused[i] = 0;
concurrency++;
ctx->concur_list_len++;
}
}
}
n_left -= concurrency;
// adding a barrier different layer
ctx->concur_list[level_pos + concurrency] = -1;
ctx->concur_list_len++;
// jump all sorted nodes at nodes_bak
while (!nodes_unused[n_start]) {
n_start++;
}
level_pos += concurrency + 1;
}
if (ctx->concur_list_len > GGML_MAX_CONCUR) {
fprintf(stderr, "%s: too many elements for metal ctx->concur_list!\n", __func__);
}
}
void ggml_metal_graph_compute(
struct ggml_metal_context * ctx,
struct ggml_cgraph * gf) {
metal_printf("%s: evaluating graph\n", __func__);
// if there is ctx->concur_list, dispatch concurrently
// else fallback to serial dispatch
MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
const bool has_concur = ctx->concur_list_len && ctx->concur_list_len <= GGML_MAX_CONCUR;
const int n_nodes = has_concur ? ctx->concur_list_len : gf->n_nodes;
edesc.dispatchType = has_concur ? MTLDispatchTypeConcurrent : MTLDispatchTypeSerial;
// create multiple command buffers and enqueue them
// then, we encode the graph into the command buffers in parallel
@@ -404,7 +525,7 @@ void ggml_metal_graph_compute(
dispatch_queue_t queue = dispatch_queue_create("llama.cpp", DISPATCH_QUEUE_CONCURRENT);
for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
const int n_nodes_per_cb = (gf->n_nodes + n_cb - 1) / n_cb;
const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
dispatch_async(queue, ^{
size_t offs_src0 = 0;
@@ -415,10 +536,21 @@ void ggml_metal_graph_compute(
id<MTLComputeCommandEncoder> encoder = nil;
const int node_start = (cb_idx + 0) * n_nodes_per_cb;
const int node_end = (cb_idx == n_cb - 1) ? gf->n_nodes : (cb_idx + 1) * n_nodes_per_cb;
const int node_start = (cb_idx + 0) * n_nodes_per_cb;
const int node_end = (cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb;
for (int ind = node_start; ind < node_end; ++ind) {
const int i = has_concur ? ctx->concur_list[ind] : ind;
if (i == -1) {
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
continue;
}
[encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
continue;
}
for (int i = node_start; i < node_end; ++i) {
metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
struct ggml_tensor * src0 = gf->nodes[i]->src[0];
@@ -489,13 +621,19 @@ void ggml_metal_graph_compute(
case GGML_OP_ADD:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
[encoder setComputePipelineState:ctx->pipeline_add];
if (ggml_nelements(src1) == ne10) {
// src1 is a row
[encoder setComputePipelineState:ctx->pipeline_add_row];
} else {
[encoder setComputePipelineState:ctx->pipeline_add];
}
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
const int64_t n = ggml_nelements(dst);
@@ -504,7 +642,7 @@ void ggml_metal_graph_compute(
case GGML_OP_MUL:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
if (ggml_nelements(src1) == ne10) {
@@ -525,7 +663,7 @@ void ggml_metal_graph_compute(
case GGML_OP_SCALE:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
const float scale = *(const float *) src1->data;
@@ -539,52 +677,60 @@ void ggml_metal_graph_compute(
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_OP_SILU:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
}
case GGML_OP_UNARY:
switch (ggml_get_unary_op(gf->nodes[i])) {
case GGML_UNARY_OP_SILU:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
[encoder setComputePipelineState:ctx->pipeline_silu];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
[encoder setComputePipelineState:ctx->pipeline_silu];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
const int64_t n = ggml_nelements(dst);
const int64_t n = ggml_nelements(dst);
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_UNARY_OP_RELU:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
[encoder setComputePipelineState:ctx->pipeline_relu];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
const int64_t n = ggml_nelements(dst);
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_UNARY_OP_GELU:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
[encoder setComputePipelineState:ctx->pipeline_gelu];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
const int64_t n = ggml_nelements(dst);
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
default:
{
fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
GGML_ASSERT(false);
}
} break;
case GGML_OP_RELU:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
}
[encoder setComputePipelineState:ctx->pipeline_relu];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
const int64_t n = ggml_nelements(dst);
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_OP_GELU:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
}
[encoder setComputePipelineState:ctx->pipeline_gelu];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
const int64_t n = ggml_nelements(dst);
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_OP_SOFT_MAX:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
const int nth = 32;
@@ -602,10 +748,10 @@ void ggml_metal_graph_compute(
case GGML_OP_DIAG_MASK_INF:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
const int n_past = ((int32_t *)(src1->data))[0];
const int n_past = ((int32_t *)(dst->op_params))[0];
[encoder setComputePipelineState:ctx->pipeline_diag_mask_inf];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
@@ -621,7 +767,8 @@ void ggml_metal_graph_compute(
// TODO: needs to be updated after PR: https://github.com/ggerganov/ggml/pull/224
GGML_ASSERT(ne00 == ne10);
GGML_ASSERT(ne02 == ne12);
// GGML_ASSERT(ne02 == ne12); // Should be checked on individual data types until broadcast is implemented everywhere
GGML_ASSERT(ne03 == ne13);
if (ggml_is_contiguous(src0) &&
ggml_is_contiguous(src1) &&
@@ -649,11 +796,11 @@ void ggml_metal_graph_compute(
initWithDevice:ctx->device transposeLeft:false transposeRight:true
resultRows:ne11 resultColumns:ne01 interiorColumns:ne00 alpha:1.0 beta:0.0];
// we need to do ne02 multiplications
// we need to do ne12 multiplications
// TODO: is there a way to do this in parallel - currently very slow ..
// TODO: might be possible to offload part of the computation to ANE using Accelerate's CBLAS
for (int64_t i02 = 0; i02 < ne02; ++i02) {
size_t offs_src0_cur = offs_src0 + i02*nb02;
for (int64_t i02 = 0; i02 < ne12; ++i02) {
size_t offs_src0_cur = offs_src0 + i02/(ne12/ne02)*nb02; // gqa not used for now
size_t offs_src1_cur = offs_src1 + i02*nb12;
size_t offs_dst_cur = offs_dst + i02*nb2;
@@ -665,7 +812,7 @@ void ggml_metal_graph_compute(
}
} else {
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
int nth0 = 32;
@@ -675,8 +822,6 @@ void ggml_metal_graph_compute(
switch (src0t) {
case GGML_TYPE_F16:
{
GGML_ASSERT(ne02 == ne12);
nth0 = 64;
nth1 = 1;
[encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32];
@@ -704,8 +849,8 @@ void ggml_metal_graph_compute(
GGML_ASSERT(ne02 == 1);
GGML_ASSERT(ne12 == 1);
nth0 = 4;
nth1 = 16;
nth0 = 2;
nth1 = 32;
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_K_f32];
} break;
case GGML_TYPE_Q3_K:
@@ -713,8 +858,8 @@ void ggml_metal_graph_compute(
GGML_ASSERT(ne02 == 1);
GGML_ASSERT(ne12 == 1);
nth0 = 4;
nth1 = 16;
nth0 = 2;
nth1 = 32;
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_K_f32];
} break;
case GGML_TYPE_Q4_K:
@@ -756,31 +901,35 @@ void ggml_metal_graph_compute(
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
[encoder setBytes:&nb00 length:sizeof(nb00) atIndex:5];
[encoder setBytes:&nb01 length:sizeof(nb01) atIndex:6];
[encoder setBytes:&nb02 length:sizeof(nb02) atIndex:7];
[encoder setBytes:&ne10 length:sizeof(ne10) atIndex:8];
[encoder setBytes:&ne11 length:sizeof(ne11) atIndex:9];
[encoder setBytes:&nb10 length:sizeof(nb10) atIndex:10];
[encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14];
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
[encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
[encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
[encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
[encoder setBytes:&ne10 length:sizeof(ne10) atIndex:9];
[encoder setBytes:&ne11 length:sizeof(ne11) atIndex:10];
[encoder setBytes:&ne12 length:sizeof(ne12) atIndex:11];
[encoder setBytes:&nb10 length:sizeof(nb10) atIndex:12];
[encoder setBytes:&nb11 length:sizeof(nb11) atIndex:13];
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
src0t == GGML_TYPE_Q4_K) {
src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_Q4_K) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7) / 8, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
else if (src0t == GGML_TYPE_Q3_K) {
#ifdef GGML_QKK_64
[encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
#else
[encoder dispatchThreadgroups:MTLSizeMake((ne01+3)/4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
#endif
}
else if (src0t == GGML_TYPE_Q5_K) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3) / 4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
else if (src0t == GGML_TYPE_Q6_K) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
else if (src0t == GGML_TYPE_Q2_K ||
src0t == GGML_TYPE_Q3_K) {
[encoder setThreadgroupMemoryLength:nth0*nth1*sizeof(float) atIndex:0];
[encoder dispatchThreadgroups:MTLSizeMake(ne01, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
} else {
[encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0];
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
@@ -790,7 +939,7 @@ void ggml_metal_graph_compute(
case GGML_OP_GET_ROWS:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
switch (src0->type) {
@@ -819,10 +968,11 @@ void ggml_metal_graph_compute(
case GGML_OP_RMS_NORM:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
const float eps = 1e-6f;
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
const int nth = 512;
@@ -841,7 +991,7 @@ void ggml_metal_graph_compute(
case GGML_OP_NORM:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
const float eps = 1e-5f;
@@ -863,14 +1013,15 @@ void ggml_metal_graph_compute(
case GGML_OP_ALIBI:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
GGML_ASSERT((src0t == GGML_TYPE_F32));
const int n_past = ((int32_t *) src1->data)[0]; UNUSED(n_past);
const int n_head = ((int32_t *) src1->data)[1];
const float max_bias = ((float *) src1->data)[2];
const int n_past = ((int32_t *) dst->op_params)[0]; UNUSED(n_past);
const int n_head = ((int32_t *) dst->op_params)[1];
float max_bias;
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
if (__builtin_popcount(n_head) != 1) {
GGML_ASSERT(false && "only power-of-two n_head implemented");
@@ -905,18 +1056,17 @@ void ggml_metal_graph_compute(
case GGML_OP_ROPE:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
const int n_dims = ((int32_t *) src1->data)[1];
const int mode = ((int32_t *) src1->data)[2];
const int n_past = ((int32_t *)(src1->data))[0];
const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
float freq_base;
float freq_scale;
memcpy(&freq_base, (int32_t *) src1->data + 4, sizeof(float));
memcpy(&freq_scale, (int32_t *) src1->data + 5, sizeof(float));
memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
[encoder setComputePipelineState:ctx->pipeline_rope];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
@@ -945,10 +1095,12 @@ void ggml_metal_graph_compute(
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_OP_DUP:
case GGML_OP_CPY:
case GGML_OP_CONT:
{
if (encoder == nil) {
encoder = [command_buffer computeCommandEncoder];
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
}
const int nth = 32;
@@ -995,8 +1147,10 @@ void ggml_metal_graph_compute(
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
default:
fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
GGML_ASSERT(false);
{
fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
GGML_ASSERT(false);
}
}
}

View File

@@ -1,7 +1,7 @@
//go:build darwin
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -95,6 +95,17 @@ kernel void kernel_add(
dst[tpig] = src0[tpig] + src1[tpig];
}
// assumption: src1 is a row
// broadcast src1 into src0
kernel void kernel_add_row(
device const float * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] + src1[tpig % ne00];
}
kernel void kernel_mul(
device const float * src0,
device const float * src1,
@@ -379,7 +390,7 @@ kernel void kernel_rms_norm(
threadgroup_barrier(mem_flags::mem_threadgroup);
// broadcast, simd group number is ntg / 32
for (int i = ntg / 32 / 2; i > 0; i /= 2) {
for (uint i = ntg / 32 / 2; i > 0; i /= 2) {
if (tpitg < i) {
sum[tpitg] += sum[tpitg + i];
}
@@ -404,87 +415,90 @@ kernel void kernel_rms_norm(
}
}
// function for calculate inner product between a q4_0 block and 32 floats (yl), sumy is SUM(yl[i])
float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl) {
// function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i])
// il indicates where the q4 quants begin (0 or QK4_0/4)
// we assume that the yl's have been multiplied with the appropriate scale factor
// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
float4 acc = 0.f;
device uint16_t * qs = ((device uint16_t *)qb_curr + 1);
for (int i = 0; i < 16; i+=2) {
acc[0] += yl[i] * (qs[i / 2] & 0x000F);
acc[1] += yl[i + 16] * (qs[i / 2] & 0x00F0);
acc[2] += yl[i + 1] * (qs[i / 2] & 0x0F00);
acc[3] += yl[i + 17] * (qs[i / 2] & 0xF000);
float2 acc = 0.f;
device const uint16_t * qs = ((device const uint16_t *)qb_curr + 1 + il/2);
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
+ yl[i + 1] * (qs[i / 2] & 0x0F00);
acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0)
+ yl[i + 9] * (qs[i / 2] & 0xF000);
}
return d * (sumy * -8.f + acc[0] + acc[1]/16.f + acc[2]/256.f + acc[3]/4096.f);
return d * (sumy * -8.f + acc[0] + acc[1]);
}
// function for calculate inner product between a q4_1 block and 32 floats (yl), sumy is SUM(yl[i])
float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl) {
// function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i])
// il indicates where the q4 quants begin (0 or QK4_0/4)
// we assume that the yl's have been multiplied with the appropriate scale factor
// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
float m = qb_curr->m;
float4 acc = 0.f;
device uint16_t * qs = ((device uint16_t *)qb_curr + 2);
for (int i = 0; i < 16; i+=2) {
acc[0] += yl[i] * (qs[i / 2] & 0x000F);
acc[1] += yl[i + 16] * (qs[i / 2] & 0x00F0);
acc[2] += yl[i + 1] * (qs[i / 2] & 0x0F00);
acc[3] += yl[i + 17] * (qs[i / 2] & 0xF000);
device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2);
float2 acc = 0.f;
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
+ yl[i + 1] * (qs[i / 2] & 0x0F00);
acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0)
+ yl[i + 9] * (qs[i / 2] & 0xF000);
}
return d * (acc[0] + acc[1]/16.f + acc[2]/256.f + acc[3]/4096.f) + sumy * m;
return d * (acc[0] + acc[1]) + sumy * m;
}
// putting them in the kernel cause a significant performance penalty
#define N_DST 4 // each SIMD group works on 4 rows
#define N_SIMDGROUP 2 // number of SIMD groups in a thread group
#define N_SIMDWIDTH 32 // assuming SIMD group size is 32
template<typename block_q_type>
//Note: This is a template, but strictly speaking it only applies to
// quantizations where the block size is 32. It also does not
// giard against the number of rows not being divisible by
// N_DST, so this is another explicit assumption of the implementation.
template<typename block_q_type, int nr, int nsg, int nw>
void mul_vec_q_n_f32(device const void * src0, device const float * src1, device float * dst,
int64_t ne00, int64_t ne10, int64_t ne0, int64_t ne01,
uint2 tgpig, uint tiisg, uint sgitg) {
const int nb = ne00/QK4_0;
const int r0 = tgpig.x;
const int r1 = tgpig.y;
device const block_q_type * x = (device const block_q_type *) src0 + (r0 * N_SIMDGROUP + sgitg) * N_DST * nb;
const int first_row = (r0 * nsg + sgitg) * nr;
device const block_q_type * x = (device const block_q_type *) src0 + first_row * nb;
device const float * y = (device const float *) src1 + r1*ne10;
float4 y_curr[8]; // src1 vector cache
float sumf[N_DST]={0.f}, all_sum;
thread float * yl=(thread float *)y_curr;
float yl[16]; // src1 vector cache
float sumf[nr]={0.f};
// each thread in a SIMD group deals with 1 block.
for (int column = 0; column < nb / N_SIMDWIDTH; column++) {
const int ix = tiisg/2;
const int il = 8*(tiisg%2);
device const float * yb = y + ix * QK4_0 + il;
// each thread in a SIMD group deals with half a block.
for (int ib = ix; ib < nb; ib += nw/2) {
float sumy = 0;
for (int i = 0; i < QK4_0 / 4; i++) {
y_curr[i] = *((device float4 *)(y + N_SIMDWIDTH * (tiisg + column * QK4_0)) + i);
sumy += y_curr[i][0] + y_curr[i][1] + y_curr[i][2] + y_curr[i][3];
for (int i = 0; i < 8; i += 2) {
sumy += yb[i] + yb[i+1];
yl[i+0] = yb[i+ 0];
yl[i+1] = yb[i+ 1]/256.f;
sumy += yb[i+16] + yb[i+17];
yl[i+8] = yb[i+16]/16.f;
yl[i+9] = yb[i+17]/4096.f;
}
for (int row = 0; row < N_DST; row++) {
sumf[row] += block_q_n_dot_y(x+(tiisg + row * nb + column * N_SIMDWIDTH), sumy, yl);
for (int row = 0; row < nr; row++) {
sumf[row] += block_q_n_dot_y(x+ib+row*nb, sumy, yl, il);
}
yb += QK4_0 * 16;
}
// from now loads two rows every time and 16 blocks per row
int ir = tiisg / (N_SIMDWIDTH / 2);
int ib = tiisg % (N_SIMDWIDTH / 2);
for (int ind = 0; ind < (nb % N_SIMDWIDTH + N_SIMDWIDTH / 2 - 1)/(N_SIMDWIDTH / 2); ind++) {
int nb_start = (nb / N_SIMDWIDTH) * N_SIMDWIDTH + ind * (N_SIMDWIDTH / 2); //where the left blocks start
float sumy = 0;
for (int i = 0; i < QK4_0 / 4; i++) {
y_curr[i] = *((device float4 *)(y + (nb_start + ib) * QK4_0) + i);
sumy += y_curr[i][0] + y_curr[i][1] + y_curr[i][2] + y_curr[i][3];
}
for (int row = 0; row < N_DST; row+=2) {
if (nb_start + ib < nb) {
sumf[row + ir] += block_q_n_dot_y(x + (nb_start + ib + (row + ir) * nb), sumy, yl);
}
}
}
for (int row = 0; row < N_DST; ++row) {
all_sum = simd_sum(sumf[row]);
if (tiisg == 0 && ((r0 * N_SIMDGROUP + sgitg) * N_DST + row) < ne01) {
dst[r1*ne0 + (r0 * N_SIMDGROUP + sgitg) * N_DST + row] = all_sum;
for (int row = 0; row < nr; ++row) {
const float tot = simd_sum(sumf[row]);
if (tiisg == 0 && first_row + row < ne01) {
dst[r1*ne0 + first_row + row] = tot;
}
}
}
@@ -500,7 +514,7 @@ kernel void kernel_mul_mat_q4_0_f32(
uint2 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
mul_vec_q_n_f32<block_q4_0>(src0,src1,dst,ne00,ne10,ne0,ne01,tgpig,tiisg,sgitg);
mul_vec_q_n_f32<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne10,ne0,ne01,tgpig,tiisg,sgitg);
}
kernel void kernel_mul_mat_q4_1_f32(
@@ -514,7 +528,7 @@ kernel void kernel_mul_mat_q4_1_f32(
uint2 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
mul_vec_q_n_f32<block_q4_1>(src0,src1,dst,ne00,ne10,ne0,ne01,tgpig,tiisg,sgitg);
mul_vec_q_n_f32<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne10,ne0,ne01,tgpig,tiisg,sgitg);
}
kernel void kernel_mul_mat_f16_f32(
@@ -523,11 +537,13 @@ kernel void kernel_mul_mat_f16_f32(
device float * dst,
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
constant uint64_t & nb00,
constant uint64_t & nb01,
constant uint64_t & nb02,
constant int64_t & ne10,
constant int64_t & ne11,
constant int64_t & ne12,
constant uint64_t & nb10,
constant uint64_t & nb11,
constant uint64_t & nb12,
@@ -543,7 +559,7 @@ kernel void kernel_mul_mat_f16_f32(
const int64_t r1 = tgpig.y;
const int64_t im = tgpig.z;
device const half * x = (device const half *) (src0 + r0*nb01 + im*nb02);
device const half * x = (device const half *) (src0 + r0*nb01 + im/(ne12/ne02)*nb02);
device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
sum[tpitg.x] = 0.0f;
@@ -566,6 +582,7 @@ kernel void kernel_mul_mat_f16_f32(
}
}
kernel void kernel_alibi_f32(
device const float * src0,
device float * dst,
@@ -1237,111 +1254,137 @@ kernel void kernel_mul_mat_q2_K_f32(
constant int64_t & ne00,
constant int64_t & ne10,
constant int64_t & ne0,
threadgroup float * sum [[threadgroup(0)]],
constant int64_t & ne01[[buffer(4)]],
uint2 tgpig[[threadgroup_position_in_grid]],
uint2 tpitg[[thread_position_in_threadgroup]],
uint2 tptg[[threads_per_threadgroup]]) {
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nb = ne00/QK_K;
const int r0 = tgpig.x;
const int r1 = tgpig.y;
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
const int ib_row = first_row * nb;
device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row;
device const float * y = (device const float *) src1 + r1*ne10;
float yl[32];
float sumf[N_DST]={0.f}, all_sum;
device const block_q2_K * x = (device const block_q2_K *) src0 + r0*nb;
device const float * yy = (device const float *) src1 + r1*ne10;
const int nth = tptg.x*tptg.y;
const int ith = tptg.y*tpitg.x + tpitg.y;
float sumf = 0;
const int step = sizeof(block_q2_K) * nb;
#if QK_K == 256
const int tid = tpitg.y; // 0...16
const int il = tid/4; // 0...3
const int ir = tid%4; // 0...3
const int ip = il/2; // 0 or 1
const int shift1 = 4*(il%2);// 0 or 4
const int shift2 = shift1+2;// 2 or 6
const int n = 8;
const int is = 4*il + (n*ir)/16;
const int ix = tiisg/8; // 0...3
const int it = tiisg%8; // 0...7
const int im = it/4; // 0 or 1
const int ir = it%4; // 0...3
const int is = (8*ir)/16;// 0 or 1
const int y_offset = 64*il + n*ir;
const int q_offset = 32*ip + n*ir;
device const float * y4 = y + ix * QK_K + 128 * im + 8 * ir;
for (int i = tpitg.x; i < nb; i += tptg.x) {
for (int ib = ix; ib < nb; ib += 4) {
device const uint8_t * q = x[i].qs + q_offset;
device const uint8_t * scales = x[i].scales + is;
uint8_t d1 = scales[0] & 0xF;
uint8_t d2 = scales[2] & 0xF;
uint8_t m1 = scales[0] >> 4;
uint8_t m2 = scales[2] >> 4;
device const float * y = yy + i*QK_K + y_offset;
float2 s = {0.f, 0.f};
float smin = 0;
for (int l = 0; l < n; ++l) {
s[0] += y[l+ 0] * ((q[l] >> shift1) & 3);
s[1] += y[l+32] * ((q[l] >> shift2) & 3);
smin += y[l+ 0] * m1 + y[l+32] * m2;
float4 sumy = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8];
yl[i+16] = y4[i+64]; sumy[2] += yl[i+16];
yl[i+24] = y4[i+96]; sumy[3] += yl[i+24];
}
const float dall = (float)x[i].d;
const float dmin = (float)x[i].dmin;
device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*im + is;
device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * im + 4 * ir;
device const half * dh = &x[ib].d;
sumf += dall * (s[0] * d1 + s[1] * d2) - dmin * smin;
for (int row = 0; row < N_DST; row++) {
float4 acc1 = {0.f, 0.f, 0.f, 0.f};
float4 acc2 = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003);
acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300);
acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c);
acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00);
acc1[2] += yl[i+16] * (qs[i/2] & 0x0030);
acc2[2] += yl[i+17] * (qs[i/2] & 0x3000);
acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0);
acc2[3] += yl[i+25] * (qs[i/2] & 0xc000);
}
float dall = dh[0];
float dmin = dh[1] * 1.f/16.f;
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f +
(acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f +
(acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f +
(acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) -
dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0));
qs += step/2;
sc += step;
dh += step/2;
}
y4 += 4 * QK_K;
}
#else
const int il = 4 * tpitg.x;
const int ix = tiisg/2; // 0...15
const int it = tiisg%2; // 0...1
uint32_t aux[2];
thread const uint8_t * d = (thread const uint8_t *)aux;
thread const uint8_t * m = (thread const uint8_t *)aux + 4;
device const float * y4 = y + ix * QK_K + 8 * it;
for (int i = tpitg.y; i < nb; i += tptg.y) {
for (int ib = ix; ib < nb; ib += 16) {
device const uint8_t * q = x[i].qs + il;
device const float * y = yy + i*QK_K + il;
const float dall = (float)x[i].d;
const float dmin = (float)x[i].dmin;
device const uint32_t * a = (device const uint32_t *)x[i].scales;
aux[0] = a[0] & 0x0f0f0f0f;
aux[1] = (a[0] >> 4) & 0x0f0f0f0f;
for (int l = 0; l < 4; ++l) {
sumf += y[l+ 0] * (dall * d[0] * ((q[l] >> 0) & 3) - dmin * m[0])
+ y[l+16] * (dall * d[1] * ((q[l] >> 2) & 3) - dmin * m[1])
+ y[l+32] * (dall * d[2] * ((q[l] >> 4) & 3) - dmin * m[2])
+ y[l+48] * (dall * d[3] * ((q[l] >> 6) & 3) - dmin * m[3]);
float4 sumy = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
yl[i+ 8] = y4[i+16]; sumy[1] += yl[i+ 8];
yl[i+16] = y4[i+32]; sumy[2] += yl[i+16];
yl[i+24] = y4[i+48]; sumy[3] += yl[i+24];
}
device const uint8_t * sc = (device const uint8_t *)x[ib].scales;
device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it;
device const half * dh = &x[ib].d;
for (int row = 0; row < N_DST; row++) {
float4 acc1 = {0.f, 0.f, 0.f, 0.f};
float4 acc2 = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003);
acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300);
acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c);
acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00);
acc1[2] += yl[i+16] * (qs[i/2] & 0x0030);
acc2[2] += yl[i+17] * (qs[i/2] & 0x3000);
acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0);
acc2[3] += yl[i+25] * (qs[i/2] & 0xc000);
}
float dall = dh[0];
float dmin = dh[1];
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f +
(acc1[1] + 1.f/256.f * acc2[1]) * (sc[1] & 0xF) * 1.f/ 4.f +
(acc1[2] + 1.f/256.f * acc2[2]) * (sc[2] & 0xF) * 1.f/16.f +
(acc1[3] + 1.f/256.f * acc2[3]) * (sc[3] & 0xF) * 1.f/64.f) -
dmin * (sumy[0] * (sc[0] >> 4) + sumy[1] * (sc[1] >> 4) + sumy[2] * (sc[2] >> 4) + sumy[3] * (sc[3] >> 4));
qs += step/2;
sc += step;
dh += step/2;
}
y4 += 16 * QK_K;
}
#endif
sum[ith] = sumf;
//
// Accumulate the sum from all threads in the threadgroup
//
threadgroup_barrier(mem_flags::mem_threadgroup);
if (ith%4 == 0) {
for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (ith%16 == 0) {
for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (ith == 0) {
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
dst[r1*ne0 + r0] = sum[0];
for (int row = 0; row < N_DST; ++row) {
all_sum = simd_sum(sumf[row]);
if (tiisg == 0) {
dst[r1*ne0 + first_row + row] = all_sum;
}
}
}
#if QK_K == 256
kernel void kernel_mul_mat_q3_K_f32(
device const void * src0,
device const float * src1,
@@ -1350,40 +1393,41 @@ kernel void kernel_mul_mat_q3_K_f32(
constant int64_t & ne10,
constant int64_t & ne0,
constant int64_t & ne1,
threadgroup float * sum [[threadgroup(0)]],
uint2 tgpig[[threadgroup_position_in_grid]],
uint2 tpitg[[thread_position_in_threadgroup]],
uint2 tptg[[threads_per_threadgroup]]) {
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nb = ne00/QK_K;
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
device const block_q3_K * x = (device const block_q3_K *) src0 + r0*nb;
const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2;
device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb;
device const float * yy = (device const float *) src1 + r1*ne10;
const int nth = tptg.x*tptg.y;
const int ith = tptg.y*tpitg.x + tpitg.y;
#if QK_K == 256
const uint8_t m3 = 3;
const int8_t m4 = 4;
float yl[16];
const uint16_t kmask1 = 0x0303;
const uint16_t kmask2 = 0x0f0f;
const int tid = tpitg.y; // expecting 16
const int tid = tiisg/2;
const int ix = tiisg%2;
const int ip = tid/8; // 0 or 1
const int il = tid/2 - 4*ip; // 0...3
const int ir = tid%2;
const int n = 8;
const int l0 = n*ir;
const uint8_t m = 1 << (4*ip + il);
const uint16_t m1 = 1 << (4*ip + il);
const uint16_t m2 = m1 << 8;
const int shift = 2*il;
const uint16_t qm1 = 0x0003 << shift;
const uint16_t qm2 = 0x0300 << shift;
const int32_t v1 = 4 << shift;
const int32_t v2 = 1024 << shift;
const uint16_t s_shift1 = 4*ip;
const uint16_t s_shift2 = s_shift1 + 2*(il/2);
@@ -1392,93 +1436,132 @@ kernel void kernel_mul_mat_q3_K_f32(
const int q_offset = 32*ip + l0;
const int y_offset = 128*ip + 32*il + l0;
//float sumf = 0;
float sumf1 = 0, sumf2 = 0;
for (int i = tpitg.x; i < nb; i += tptg.x) {
const int step = sizeof(block_q3_K) * nb / 2;
const float d_all = (float)(x[i].d);
device const float * y1 = yy + ix*QK_K + y_offset;
device const uint8_t * q = x[i].qs + q_offset;
device const uint8_t * h = x[i].hmask + l0;
device const float * y = yy + i * QK_K + y_offset;
float sumf1[2] = {0.f}, sumf2[2] = {0.f};
for (int i = ix; i < nb; i += 2) {
device const uint16_t * a = (device const uint16_t *)x[i].scales;
const char2 scales = as_type<char2>((uint16_t)(((a[il] >> s_shift1) & kmask2) | (((a[ik] >> s_shift2) & kmask1) << 4)));
float s = 0;
for (int l = 0; l < n; ++l) {
s += y[l+ 0] * ((int8_t)((q[l+ 0] >> shift) & m3) - ((h[l+ 0] & m) ? 0 : m4));
for (int l = 0; l < 8; ++l) {
yl[l+0] = y1[l+ 0];
yl[l+8] = y1[l+16];
}
float d = d_all * s;
sumf1 += d * scales[0];
sumf2 += d;
//sumf += d_all * s * (scales[0] - 32);
s = 0;
for (int l = 0; l < n; ++l) {
s += y[l+16] * ((int8_t)((q[l+16] >> shift) & m3) - ((h[l+16] & m) ? 0 : m4));
device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset);
device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0);
device const uint16_t * a = (device const uint16_t *)(x[i].scales);
device const half * dh = &x[i].d;
for (int row = 0; row < 2; ++row) {
const float d_all = (float)dh[0];
const char2 scales = as_type<char2>((uint16_t)(((a[il] >> s_shift1) & kmask2) | (((a[ik] >> s_shift2) & kmask1) << 4)));
float s1 = 0, s2 = 0;
for (int l = 0; l < n; l += 2) {
const uint16_t qs = q[l/2];
s1 += yl[l+0] * ((int32_t)(qs & qm1) - ((h[l/2] & m1) ? 0 : v1));
s2 += yl[l+1] * ((int32_t)(qs & qm2) - ((h[l/2] & m2) ? 0 : v2));
}
float d = d_all * (s1 + 1.f/256.f * s2);
sumf1[row] += d * scales[0];
sumf2[row] += d;
s1 = s2 = 0;
for (int l = 0; l < n; l += 2) {
const uint16_t qs = q[l/2+8];
s1 += yl[l+8] * ((int32_t)(qs & qm1) - ((h[l/2+8] & m1) ? 0 : v1));
s2 += yl[l+9] * ((int32_t)(qs & qm2) - ((h[l/2+8] & m2) ? 0 : v2));
}
d = d_all * (s1 + 1.f/256.f * s2);
sumf1[row] += d * scales[1];
sumf2[row] += d;
q += step;
h += step;
a += step;
dh += step;
}
d = d_all * s;
sumf1 += d * scales[1];
sumf2 += d;
//sumf += d_all * s * (scales[1] - 32);
y1 += 2 * QK_K;
}
//sum[ith] = sumf;
sum[ith] = sumf1 - 32.f*sumf2;
for (int row = 0; row < 2; ++row) {
const float sumf = (sumf1[row] - 32.f*sumf2[row]) / (1 << shift);
const float tot = simd_sum(sumf);
if (tiisg == 0) {
dst[r1*ne0 + first_row + row] = tot;
}
}
}
#else
const int il = 4 * tpitg.x; // 0, 4, 8, 12
kernel void kernel_mul_mat_q3_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne00,
constant int64_t & ne10,
constant int64_t & ne0,
constant int64_t & ne1,
uint2 tgpig[[threadgroup_position_in_grid]],
uint tiisg[[thread_index_in_simdgroup]],
uint sgitg[[simdgroup_index_in_threadgroup]]) {
const int nb = ne00/QK_K;
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
const int row = 2 * r0 + sgitg;
device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb;
device const float * yy = (device const float *) src1 + r1*ne10;
const int ix = tiisg/4;
const int il = 4 * (tiisg%4);// 0, 4, 8, 12
const int im = il/8; // 0, 0, 1, 1
const int in = il%8; // 0, 4, 0, 4
float sumf = 0;
float2 sum = {0.f, 0.f};
for (int i = tpitg.y; i < nb; i += tptg.y) {
for (int i = ix; i < nb; i += 8) {
const float d_all = (float)(x[i].d);
device const uint8_t * q = x[i].qs + il;
device const uint8_t * h = x[i].hmask + in;
device const float * y = yy + i * QK_K + il;
device const uint16_t * q = (device const uint16_t *)(x[i].qs + il);
device const uint16_t * h = (device const uint16_t *)(x[i].hmask + in);
device const uint16_t * s = (device const uint16_t *)(x[i].scales);
device const float * y = yy + i * QK_K + il;
const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
const float d1 = d_all * ((int32_t)(s[0] & 0x000F) - 8);
const float d2 = d_all * ((int32_t)(s[0] & 0x00F0) - 128) * 1.f/64.f;
const float d3 = d_all * ((int32_t)(s[0] & 0x0F00) - 2048) * 1.f/4096.f;
const float d4 = d_all * ((int32_t)(s[0] & 0xF000) - 32768) * 1.f/262144.f;
for (int l = 0; l < 4; ++l) {
const uint8_t hm = h[l] >> im;
sumf += y[l+ 0] * d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((hm & 0x01) ? 0 : 4))
+ y[l+16] * d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((hm & 0x04) ? 0 : 4))
+ y[l+32] * d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((hm & 0x10) ? 0 : 4))
+ y[l+48] * d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((hm & 0x40) ? 0 : 4));
for (int l = 0; l < 4; l += 2) {
const uint16_t hm = h[l/2] >> im;
sum[0] += y[l+ 0] * d1 * ((int32_t)(q[l/2] & 0x0003) - ((hm & 0x0001) ? 0 : 4))
+ y[l+16] * d2 * ((int32_t)(q[l/2] & 0x000c) - ((hm & 0x0004) ? 0 : 16))
+ y[l+32] * d3 * ((int32_t)(q[l/2] & 0x0030) - ((hm & 0x0010) ? 0 : 64))
+ y[l+48] * d4 * ((int32_t)(q[l/2] & 0x00c0) - ((hm & 0x0040) ? 0 : 256));
sum[1] += y[l+ 1] * d1 * ((int32_t)(q[l/2] & 0x0300) - ((hm & 0x0100) ? 0 : 1024))
+ y[l+17] * d2 * ((int32_t)(q[l/2] & 0x0c00) - ((hm & 0x0400) ? 0 : 4096))
+ y[l+33] * d3 * ((int32_t)(q[l/2] & 0x3000) - ((hm & 0x1000) ? 0 : 16384))
+ y[l+49] * d4 * ((int32_t)(q[l/2] & 0xc000) - ((hm & 0x4000) ? 0 : 65536));
}
}
const float sumf = sum[0] + sum[1] * 1.f/256.f;
sum[ith] = sumf;
#endif
//
// Accumulate the sum from all threads in the threadgroup
//
threadgroup_barrier(mem_flags::mem_threadgroup);
if (ith%4 == 0) {
for (int i = 1; i < 4; ++i) sum[ith] += sum[ith + i];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (ith%16 == 0) {
for (int i = 4; i < 16; i += 4) sum[ith] += sum[ith + i];
}
threadgroup_barrier(mem_flags::mem_threadgroup);
if (ith == 0) {
for (int i = 16; i < nth; i += 16) sum[0] += sum[i];
dst[r1*ne0 + r0] = sum[0];
const float tot = simd_sum(sumf);
if (tiisg == 0) {
dst[r1*ne0 + row] = tot;
}
}
#endif
#if QK_K == 256
kernel void kernel_mul_mat_q4_K_f32(
@@ -1776,7 +1859,6 @@ kernel void kernel_mul_mat_q5_K_f32(
for (int i = ix; i < nb; i += 8) {
float4 sumy = {0.f, 0.f, 0.f, 0.f};
for (int l = 0; l < 4; ++l) {
yl[l+0] = y[l+ 0];
yl[l+4] = y[l+16];

View File

@@ -1,7 +1,7 @@
//go:build mpi
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*

View File

@@ -1,7 +1,7 @@
//go:build mpi
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*

View File

@@ -1,7 +1,7 @@
//go:build opencl
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*

View File

@@ -1,7 +1,7 @@
//go:build opencl
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*

File diff suppressed because it is too large Load Diff

185
llm/ggml.go Normal file
View File

@@ -0,0 +1,185 @@
package llm
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
type ModelFamily string
type ModelType uint32
const (
ModelType3B ModelType = 26
ModelType7B ModelType = 32
ModelType13B ModelType = 40
ModelType34B ModelType = 48
ModelType30B ModelType = 60
ModelType65B ModelType = 80
)
func (mt ModelType) String() string {
switch mt {
case ModelType3B:
return "3B"
case ModelType7B:
return "7B"
case ModelType13B:
return "13B"
case ModelType34B:
return "34B"
case ModelType30B:
return "30B"
case ModelType65B:
return "65B"
default:
return "Unknown"
}
}
type FileType interface {
String() string
}
type GGML struct {
magic uint32
container
model
}
type model interface {
ModelFamily() ModelFamily
ModelType() ModelType
FileType() FileType
}
type container interface {
Name() string
Decode(io.Reader) error
}
type containerGGML struct {
}
func (c *containerGGML) Name() string {
return "ggml"
}
func (c *containerGGML) Decode(r io.Reader) error {
return nil
}
type containerGGMF struct {
version uint32
}
func (c *containerGGMF) Name() string {
return "ggmf"
}
func (c *containerGGMF) Decode(r io.Reader) error {
var version uint32
binary.Read(r, binary.LittleEndian, &version)
switch version {
case 1:
default:
return errors.New("invalid version")
}
c.version = version
return nil
}
type containerGGJT struct {
version uint32
}
func (c *containerGGJT) Name() string {
return "ggjt"
}
func (c *containerGGJT) Decode(r io.Reader) error {
var version uint32
binary.Read(r, binary.LittleEndian, &version)
switch version {
case 1, 2, 3:
default:
return errors.New("invalid version")
}
c.version = version
return nil
}
type containerLORA struct {
version uint32
}
func (c *containerLORA) Name() string {
return "ggla"
}
func (c *containerLORA) Decode(r io.Reader) error {
var version uint32
binary.Read(r, binary.LittleEndian, &version)
switch version {
case 1:
default:
return errors.New("invalid version")
}
c.version = version
return nil
}
const (
// / Magic constant for `ggml` files (unversioned).
FILE_MAGIC_GGML = 0x67676d6c
// / Magic constant for `ggml` files (versioned, ggmf).
FILE_MAGIC_GGMF = 0x67676d66
// / Magic constant for `ggml` files (versioned, ggjt).
FILE_MAGIC_GGJT = 0x67676a74
// / Magic constant for `ggla` files (LoRA adapter).
FILE_MAGIC_GGLA = 0x67676C61
)
func DecodeGGML(r io.ReadSeeker, hint ModelFamily) (*GGML, error) {
var ggml GGML
binary.Read(r, binary.LittleEndian, &ggml.magic)
switch ggml.magic {
case FILE_MAGIC_GGML:
ggml.container = &containerGGML{}
case FILE_MAGIC_GGMF:
ggml.container = &containerGGMF{}
case FILE_MAGIC_GGJT:
ggml.container = &containerGGJT{}
case FILE_MAGIC_GGLA:
ggml.container = &containerLORA{}
default:
return nil, errors.New("invalid file magic")
}
if err := ggml.Decode(r); err != nil {
return nil, err
}
// different model types may have different layouts for hyperparameters
switch hint {
case ModelFamilyLlama:
var llama llamaModel
binary.Read(r, binary.LittleEndian, &llama.hyperparameters)
ggml.model = &llama
// TODO: sanity check hyperparameters
default:
return nil, fmt.Errorf("unsupported model type: %s", hint)
}
// final model type
return &ggml, nil
}

View File

@@ -1,5 +1,5 @@
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -209,6 +209,15 @@
# define GGML_API
#endif
// TODO: support for clang
#ifdef __GNUC__
# define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
#elif defined(_MSC_VER)
# define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
#else
# define GGML_DEPRECATED(func, hint) func
#endif
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
@@ -225,6 +234,7 @@
#define GGML_MAX_CONTEXTS 64
#define GGML_MAX_SRC 6
#define GGML_MAX_NAME 48
#define GGML_MAX_OP_PARAMS 32
#define GGML_DEFAULT_N_THREADS 4
@@ -233,6 +243,7 @@
#define GGML_UNUSED(x) (void)(x)
#define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
#define GGML_ASSERT(x) \
do { \
@@ -355,16 +366,6 @@ extern "C" {
GGML_OP_ARGMAX,
GGML_OP_REPEAT,
GGML_OP_REPEAT_BACK,
GGML_OP_ABS,
GGML_OP_SGN,
GGML_OP_NEG,
GGML_OP_STEP,
GGML_OP_TANH,
GGML_OP_ELU,
GGML_OP_RELU,
GGML_OP_GELU,
GGML_OP_GELU_QUICK,
GGML_OP_SILU,
GGML_OP_SILU_BACK,
GGML_OP_NORM, // normalize
GGML_OP_RMS_NORM,
@@ -403,9 +404,15 @@ extern "C" {
GGML_OP_WIN_PART,
GGML_OP_WIN_UNPART,
GGML_OP_UNARY,
GGML_OP_MAP_UNARY,
GGML_OP_MAP_BINARY,
GGML_OP_MAP_CUSTOM1_F32,
GGML_OP_MAP_CUSTOM2_F32,
GGML_OP_MAP_CUSTOM3_F32,
GGML_OP_MAP_CUSTOM1,
GGML_OP_MAP_CUSTOM2,
GGML_OP_MAP_CUSTOM3,
@@ -416,6 +423,24 @@ extern "C" {
GGML_OP_COUNT,
};
enum ggml_unary_op {
GGML_UNARY_OP_ABS,
GGML_UNARY_OP_SGN,
GGML_UNARY_OP_NEG,
GGML_UNARY_OP_STEP,
GGML_UNARY_OP_TANH,
GGML_UNARY_OP_ELU,
GGML_UNARY_OP_RELU,
GGML_UNARY_OP_GELU,
GGML_UNARY_OP_GELU_QUICK,
GGML_UNARY_OP_SILU,
};
enum ggml_object_type {
GGML_OBJECT_TENSOR,
GGML_OBJECT_GRAPH,
GGML_OBJECT_WORK_BUFFER
};
// ggml object
struct ggml_object {
@@ -424,7 +449,9 @@ extern "C" {
struct ggml_object * next;
char padding[8];
enum ggml_object_type type;
char padding[4];
};
static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
@@ -444,6 +471,9 @@ extern "C" {
// compute data
enum ggml_op op;
// op params - allocated as int32_t for alignment
int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)];
bool is_param;
struct ggml_tensor * grad;
@@ -460,7 +490,7 @@ extern "C" {
void * extra; // extra things e.g. for ggml-cuda.cu
char padding[8];
char padding[4];
};
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
@@ -481,6 +511,11 @@ extern "C" {
void * abort_callback_data;
};
// next prime after GGML_MAX_NODES
// #define GGML_GRAPH_HASHTABLE_SIZE 4099
// next prime after GGML_MAX_NODES * 2 (nodes + leafs)
#define GGML_GRAPH_HASHTABLE_SIZE 8273
// computation graph
struct ggml_cgraph {
int n_nodes;
@@ -490,12 +525,16 @@ extern "C" {
struct ggml_tensor * grads[GGML_MAX_NODES];
struct ggml_tensor * leafs[GGML_MAX_NODES];
void * visited_hash_table[GGML_GRAPH_HASHTABLE_SIZE];
// performance
int perf_runs;
int64_t perf_cycles;
int64_t perf_time_us;
};
static const size_t GGML_GRAPH_SIZE = sizeof(struct ggml_cgraph);
// scratch buffer
struct ggml_scratch {
size_t offs;
@@ -557,6 +596,7 @@ extern "C" {
GGML_API const char * ggml_type_name(enum ggml_type type);
GGML_API const char * ggml_op_name (enum ggml_op op);
GGML_API const char * ggml_op_symbol(enum ggml_op op);
GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor);
@@ -569,6 +609,8 @@ extern "C" {
GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);
GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
// use this to compute the memory overhead of a tensor
GGML_API size_t ggml_tensor_overhead(void);
@@ -580,6 +622,7 @@ extern "C" {
GGML_API size_t ggml_used_mem(const struct ggml_context * ctx);
GGML_API size_t ggml_set_scratch (struct ggml_context * ctx, struct ggml_scratch scratch);
GGML_API bool ggml_get_no_alloc(struct ggml_context * ctx);
GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc);
GGML_API void * ggml_get_mem_buffer (const struct ggml_context * ctx);
@@ -639,9 +682,11 @@ extern "C" {
GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
GGML_API const char * ggml_get_name(const struct ggml_tensor * tensor);
GGML_API struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name);
GGML_API struct ggml_tensor * ggml_format_name(struct ggml_tensor * tensor, const char * fmt, ...);
GGML_API enum ggml_unary_op ggml_get_unary_op(const struct ggml_tensor * tensor);
GGML_API const char * ggml_get_name (const struct ggml_tensor * tensor);
GGML_API struct ggml_tensor * ggml_set_name ( struct ggml_tensor * tensor, const char * name);
GGML_API struct ggml_tensor * ggml_format_name( struct ggml_tensor * tensor, const char * fmt, ...);
//
// operations on tensors with backpropagation
@@ -651,6 +696,11 @@ extern "C" {
struct ggml_context * ctx,
struct ggml_tensor * a);
// in-place, returns view(a)
GGML_API struct ggml_tensor * ggml_dup_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a);
GGML_API struct ggml_tensor * ggml_add(
struct ggml_context * ctx,
struct ggml_tensor * a,
@@ -875,14 +925,17 @@ extern "C" {
GGML_API struct ggml_tensor * ggml_rms_norm(
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * a,
float eps);
GGML_API struct ggml_tensor * ggml_rms_norm_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * a,
float eps);
// a - x
// b - dy
// TODO: update with configurable eps
GGML_API struct ggml_tensor * ggml_rms_norm_back(
struct ggml_context * ctx,
struct ggml_tensor * a,
@@ -974,11 +1027,22 @@ extern "C" {
struct ggml_tensor * a,
struct ggml_tensor * b);
// a -> b, in-place, return view(b)
GGML_API struct ggml_tensor * ggml_cpy_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b);
// make contiguous
GGML_API struct ggml_tensor * ggml_cont(
struct ggml_context * ctx,
struct ggml_tensor * a);
// make contiguous, in-place
GGML_API struct ggml_tensor * ggml_cont_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a);
// return view(a), b specifies the new shape
// TODO: when we start computing gradient, make a copy instead of view
GGML_API struct ggml_tensor * ggml_reshape(
@@ -1147,16 +1211,27 @@ extern "C" {
int mode,
int n_ctx);
// custom RoPE, in-place, returns view(a)
// custom RoPE
GGML_API struct ggml_tensor * ggml_rope_custom(
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
int n_dims,
int mode,
int n_ctx,
float freq_base,
float freq_scale);
// in-place, returns view(a)
GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
int n_dims,
int mode,
int n_ctx,
float freq_base,
float freq_scale,
int n_ctx);
float freq_scale);
// rotary position embedding backward, i.e compute dx from dy
// a - dy
@@ -1165,7 +1240,8 @@ extern "C" {
struct ggml_tensor * a,
int n_past,
int n_dims,
int mode);
int mode,
int n_ctx);
// alibi position embedding
// in-place, returns view(a)
@@ -1205,7 +1281,7 @@ extern "C" {
// conv_1d with padding = half
// alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
GGML_API struct ggml_tensor* ggml_conv_1d_ph(
GGML_API struct ggml_tensor * ggml_conv_1d_ph(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
@@ -1218,7 +1294,7 @@ extern "C" {
GGML_OP_POOL_COUNT,
};
GGML_API struct ggml_tensor* ggml_pool_1d(
GGML_API struct ggml_tensor * ggml_pool_1d(
struct ggml_context * ctx,
struct ggml_tensor * a,
enum ggml_op_pool op,
@@ -1226,7 +1302,7 @@ extern "C" {
int s0, // stride
int p0); // padding
GGML_API struct ggml_tensor* ggml_pool_2d(
GGML_API struct ggml_tensor * ggml_pool_2d(
struct ggml_context * ctx,
struct ggml_tensor * a,
enum ggml_op_pool op,
@@ -1280,6 +1356,16 @@ extern "C" {
int h0,
int w);
GGML_API struct ggml_tensor * ggml_unary(
struct ggml_context * ctx,
struct ggml_tensor * a,
enum ggml_unary_op op);
GGML_API struct ggml_tensor * ggml_unary_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
enum ggml_unary_op op);
// custom operators
typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
@@ -1289,63 +1375,129 @@ extern "C" {
typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
GGML_API struct ggml_tensor * ggml_map_unary_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
ggml_unary_op_f32_t fun);
ggml_unary_op_f32_t fun),
"use ggml_map_custom1 instead");
GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
ggml_unary_op_f32_t fun);
ggml_unary_op_f32_t fun),
"use ggml_map_custom1_inplace instead");
GGML_API struct ggml_tensor * ggml_map_binary_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
ggml_binary_op_f32_t fun);
ggml_binary_op_f32_t fun),
"use ggml_map_custom2 instead");
GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
ggml_binary_op_f32_t fun);
ggml_binary_op_f32_t fun),
"use ggml_map_custom2_inplace instead");
GGML_API struct ggml_tensor * ggml_map_custom1_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
ggml_custom1_op_f32_t fun);
ggml_custom1_op_f32_t fun),
"use ggml_map_custom1 instead");
GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
ggml_custom1_op_f32_t fun);
ggml_custom1_op_f32_t fun),
"use ggml_map_custom1_inplace instead");
GGML_API struct ggml_tensor * ggml_map_custom2_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
ggml_custom2_op_f32_t fun);
ggml_custom2_op_f32_t fun),
"use ggml_map_custom2 instead");
GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
ggml_custom2_op_f32_t fun);
ggml_custom2_op_f32_t fun),
"use ggml_map_custom2_inplace instead");
GGML_API struct ggml_tensor * ggml_map_custom3_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
struct ggml_tensor * c,
ggml_custom3_op_f32_t fun);
ggml_custom3_op_f32_t fun),
"use ggml_map_custom3 instead");
GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
struct ggml_tensor * c,
ggml_custom3_op_f32_t fun);
ggml_custom3_op_f32_t fun),
"use ggml_map_custom3_inplace instead");
// custom operators v2
typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata);
typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata);
typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata);
#define GGML_N_TASKS_MAX -1
GGML_API struct ggml_tensor * ggml_map_custom1(
struct ggml_context * ctx,
struct ggml_tensor * a,
ggml_custom1_op_t fun,
int n_tasks,
void * userdata);
GGML_API struct ggml_tensor * ggml_map_custom1_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
ggml_custom1_op_t fun,
int n_tasks,
void * userdata);
GGML_API struct ggml_tensor * ggml_map_custom2(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
ggml_custom2_op_t fun,
int n_tasks,
void * userdata);
GGML_API struct ggml_tensor * ggml_map_custom2_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
ggml_custom2_op_t fun,
int n_tasks,
void * userdata);
GGML_API struct ggml_tensor * ggml_map_custom3(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
struct ggml_tensor * c,
ggml_custom3_op_t fun,
int n_tasks,
void * userdata);
GGML_API struct ggml_tensor * ggml_map_custom3_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
struct ggml_tensor * c,
ggml_custom3_op_t fun,
int n_tasks,
void * userdata);
// loss function
@@ -1368,11 +1520,17 @@ extern "C" {
struct ggml_context * ctx,
struct ggml_tensor * tensor);
GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
// graph allocation in a context
GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx);
GGML_API struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor);
GGML_API size_t ggml_graph_overhead(void);
// ggml_graph_plan() has to be called before ggml_graph_compute()
// when plan.work_size > 0, caller must allocate memory for plan.work_data
GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);

View File

@@ -1,5 +1,5 @@
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -65,6 +65,8 @@
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
//
// 2-6 bit quantization in super-blocks
//
@@ -1379,7 +1381,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
const __m256i scales[2] = {_mm256_set_m128i(l_scales, l_scales), _mm256_set_m128i(h_scales, h_scales)};
const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
__m256i sumi = _mm256_setzero_si256();
@@ -1447,7 +1449,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
// sumf += -dmin * summs in 32bits*8
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(_mm256_set_m128i(summs_1, summs_0))), acc);
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
@@ -1519,7 +1521,7 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
}
// sumf += dall * isum - dmin * summs in 32bits
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
}
@@ -1670,8 +1672,8 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
summs += dmin * smin;
const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
const __m256i q2_0 = _mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q2bits, 2), q2bits), m3);
const __m256i q2_1 = _mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
@@ -1692,6 +1694,62 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
*s = hsum_float_8(acc) + summs;
#elif defined __AVX__
const __m128i m3 = _mm_set1_epi8(3);
__m256 acc = _mm256_setzero_ps();
uint32_t ud, um;
const uint8_t * restrict db = (const uint8_t *)&ud;
const uint8_t * restrict mb = (const uint8_t *)&um;
float summs = 0;
// TODO: optimize this
for (int i = 0; i < nb; ++i) {
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
const uint8_t * restrict q2 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
ud = (sc[0] >> 0) & 0x0f0f0f0f;
um = (sc[0] >> 4) & 0x0f0f0f0f;
int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
summs += dmin * smin;
const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
const __m128i q2_0 = _mm_and_si128(q2bits, m3);
const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
}
*s = hsum_float_8(acc) + summs;
#else
float sumf = 0;
@@ -1887,7 +1945,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
const __m256i scales[2] = {_mm256_set_m128i(l_scales, l_scales), _mm256_set_m128i(h_scales, h_scales)};
const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
// high bit
const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
@@ -2098,7 +2156,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
}
// multiply with block scale and accumulate
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
}
@@ -2273,13 +2331,13 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
aux16[0] = a & 0x0f0f;
aux16[1] = (a >> 4) & 0x0f0f;
const __m256i scale_0 = _mm256_set_m128i(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
const __m256i scale_1 = _mm256_set_m128i(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
memcpy(&aux64, x[i].hmask, 8);
const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
__m256i q3h_0 = _mm256_set_m128i(_mm_srli_epi16(haux, 2), haux);
__m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
__m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
@@ -2288,7 +2346,7 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
// prepare low and high bits
const __m256i q3aux = _mm256_set_m128i(_mm_srli_epi16(q3bits, 2), q3bits);
const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
@@ -2321,6 +2379,93 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
*s = hsum_float_8(acc);
#elif defined __AVX__
const __m128i m3 = _mm_set1_epi8(3);
const __m128i m1 = _mm_set1_epi8(1);
__m256 acc = _mm256_setzero_ps();
uint64_t aux64;
uint16_t aux16[2];
const int8_t * aux8 = (const int8_t *)aux16;
for (int i = 0; i < nb; ++i) {
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
const uint8_t * restrict q3 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
const uint16_t a = *(const uint16_t *)x[i].scales;
aux16[0] = a & 0x0f0f;
aux16[1] = (a >> 4) & 0x0f0f;
const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
memcpy(&aux64, x[i].hmask, 8);
__m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
__m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
__m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
__m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
// load low 2 bits
const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
// prepare low and high bits
const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
// load Q8 quants
const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
// Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
// and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
// and 2 if the high bit was set)
const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
__m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
__m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
__m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
__m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
p16_0 = _mm_sub_epi16(p16_0, q8s_0);
p16_1 = _mm_sub_epi16(p16_1, q8s_1);
p16_2 = _mm_sub_epi16(p16_2, q8s_2);
p16_3 = _mm_sub_epi16(p16_3, q8s_3);
// multiply with scales
p16_0 = _mm_madd_epi16(scale_0, p16_0);
p16_1 = _mm_madd_epi16(scale_1, p16_1);
p16_2 = _mm_madd_epi16(scale_2, p16_2);
p16_3 = _mm_madd_epi16(scale_3, p16_3);
p16_0 = _mm_add_epi32(p16_0, p16_2);
p16_1 = _mm_add_epi32(p16_1, p16_3);
__m256i p16 = MM256_SET_M128I(p16_1, p16_0);
// multiply with block scale and accumulate
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
}
*s = hsum_float_8(acc);
#else
int8_t aux8[QK_K];
@@ -2503,7 +2648,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
const __m256i scales = _mm256_set_m128i(sc128, sc128);
const __m256i scales = MM256_SET_M128I(sc128, sc128);
__m256i sumi = _mm256_setzero_si256();
@@ -2610,7 +2755,7 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
}
__m256 vd = _mm256_set1_ps(d);
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
}
@@ -2807,6 +2952,60 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
*s = hsum_float_8(acc) - summs;
#elif defined __AVX__
const __m128i m4 = _mm_set1_epi8(0xF);
__m256 acc = _mm256_setzero_ps();
float summs = 0;
uint16_t aux16[2];
const uint8_t * scales = (const uint8_t *)aux16;
for (int i = 0; i < nb; ++i) {
const float d = ggml_fp16_to_fp32(x[i].d[0]) * y[i].d;
const float m = ggml_fp16_to_fp32(x[i].d[1]) * y[i].d;
const __m256 vd = _mm256_set1_ps(d);
const uint16_t * a = (const uint16_t *)x[i].scales;
aux16[0] = a[0] & 0x0f0f;
aux16[1] = (a[0] >> 4) & 0x0f0f;
summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
const uint8_t * restrict q4 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
}
*s = hsum_float_8(acc) - summs;
#else
uint8_t aux8[QK_K];
@@ -2989,7 +3188,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
summs += dmin * _mm_extract_epi32(hsum, 0);
const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
const __m256i scales = _mm256_set_m128i(sc128, sc128);
const __m256i scales = MM256_SET_M128I(sc128, sc128);
const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
__m256i hmask = mone;
@@ -3128,7 +3327,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
}
__m256 vd = _mm256_set1_ps(d);
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
}
@@ -3291,13 +3490,13 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
const __m256i scale_l = _mm256_set_m128i(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
const __m256i scale_h = _mm256_set_m128i(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
int64_t aux64;
memcpy(&aux64, x[i].qh, 8);
const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
const __m256i haux256 = _mm256_set_m128i(_mm_srli_epi16(haux128, 2), haux128);
const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
@@ -3321,10 +3520,66 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
*s = hsum_float_8(acc);
#elif defined __AVX__
const __m128i m4 = _mm_set1_epi8(0xF);
const __m128i mone = _mm_set1_epi8(1);
__m256 acc = _mm256_setzero_ps();
for (int i = 0; i < nb; ++i) {
const uint8_t * restrict q5 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
int64_t aux64;
memcpy(&aux64, x[i].qh, 8);
const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
}
*s = hsum_float_8(acc);
#else
uint8_t aux8[QK_K];
int8_t aux8[QK_K];
int16_t aux16[16];
float sums [8];
memset(sums, 0, 8*sizeof(float));
@@ -3334,7 +3589,7 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
const uint8_t * restrict q4 = x[i].qs;
const uint8_t * restrict hm = x[i].qh;
const int8_t * restrict q8 = y[i].qs;
uint8_t * restrict a = aux8;
int8_t * restrict a = aux8;
for (int l = 0; l < 32; ++l) {
a[l+ 0] = q4[l] & 0xF;
a[l+32] = q4[l] >> 4;
@@ -3698,7 +3953,7 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
}
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
}
@@ -3856,8 +4111,8 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_set_m128i(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
@@ -3884,6 +4139,77 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
*s = hsum_float_8(acc);
#elif defined __AVX__
const __m128i m4 = _mm_set1_epi8(0xF);
const __m128i m2 = _mm_set1_epi8(3);
const __m128i m32s = _mm_set1_epi8(32);
__m256 acc = _mm256_setzero_ps();
for (int i = 0; i < nb; ++i) {
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
const uint8_t * restrict q4 = x[i].ql;
const uint8_t * restrict qh = x[i].qh;
const int8_t * restrict q8 = y[i].qs;
const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
__m128i sumi_0 = _mm_setzero_si128();
__m128i sumi_1 = _mm_setzero_si128();
const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
__m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
__m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
__m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
__m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
__m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
__m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
__m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
__m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
p16_0 = _mm_sub_epi16(p16_0, q8s_0);
p16_1 = _mm_sub_epi16(p16_1, q8s_1);
p16_2 = _mm_sub_epi16(p16_2, q8s_2);
p16_3 = _mm_sub_epi16(p16_3, q8s_3);
p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
}
*s = hsum_float_8(acc);
#else
int8_t aux8[QK_K];

View File

@@ -1,5 +1,5 @@
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*

View File

@@ -1,5 +1,5 @@
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -175,6 +175,46 @@ struct llama_file {
}
};
// llama_context_data
struct llama_data_context {
virtual void write(const void * src, size_t size) = 0;
virtual size_t get_size_written() = 0;
virtual ~llama_data_context() = default;
};
struct llama_data_buffer_context : llama_data_context {
uint8_t* ptr;
size_t size_written = 0;
llama_data_buffer_context(uint8_t * p) : ptr(p) {}
void write(const void * src, size_t size) override {
memcpy(ptr, src, size);
ptr += size;
size_written += size;
}
size_t get_size_written() override {
return size_written;
}
};
struct llama_data_file_context : llama_data_context {
llama_file* file;
size_t size_written = 0;
llama_data_file_context(llama_file * f) : file(f) {}
void write(const void * src, size_t size) override {
file->write_raw(src, size);
size_written += size;
}
size_t get_size_written() override {
return size_written;
}
};
#if defined(_WIN32)
static std::string llama_format_win_err(DWORD err) {
LPSTR buf;
@@ -205,7 +245,7 @@ struct llama_mmap {
// prefetch/readahead impairs performance on NUMA systems
if (numa) { prefetch = 0; }
#ifdef __linux__
if (prefetch) { flags |= MAP_POPULATE; }
if (prefetch >= file->size) { flags |= MAP_POPULATE; }
#endif
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
if (addr == MAP_FAILED) {
@@ -257,20 +297,29 @@ struct llama_mmap {
throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
}
#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
if (prefetch) {
// Advise the kernel to preload the mapped memory
WIN32_MEMORY_RANGE_ENTRY range;
range.VirtualAddress = addr;
range.NumberOfBytes = (SIZE_T)size;
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
llama_format_win_err(GetLastError()).c_str());
// The PrefetchVirtualMemory API is only present on Windows 8 and above, so we
// will dynamically load it using GetProcAddress.
BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
HMODULE hKernel32;
// This call is guaranteed to succeed.
hKernel32 = GetModuleHandleW(L"kernel32.dll");
// This call may fail if on a pre-Win8 system.
pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
if (pPrefetchVirtualMemory) {
// Advise the kernel to preload the mapped memory.
WIN32_MEMORY_RANGE_ENTRY range;
range.VirtualAddress = addr;
range.NumberOfBytes = (SIZE_T)size;
if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
llama_format_win_err(GetLastError()).c_str());
}
}
}
#else
#pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
#endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
}
~llama_mmap() {

File diff suppressed because it is too large Load Diff

583
llm/llama.go Normal file
View File

@@ -0,0 +1,583 @@
package llm
/*
#cgo CFLAGS: -Ofast -std=c11 -fPIC
#cgo CPPFLAGS: -Ofast -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
#cgo CXXFLAGS: -std=c++11 -fPIC
#cgo darwin CPPFLAGS: -DGGML_USE_ACCELERATE
#cgo darwin,arm64 CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#include <stdlib.h>
#include "llama.h"
struct llama_sample_options
{
float repeat_penalty;
float frequency_penalty;
float presence_penalty;
float temperature;
int32_t top_k;
float top_p;
float tfs_z;
float typical_p;
int mirostat;
float mirostat_tau;
float mirostat_eta;
bool penalize_newline;
};
llama_token llama_sample(
struct llama_context *ctx,
struct llama_token_data *candidates,
size_t n_candidates,
const llama_token *last_tokens,
size_t n_last_tokens,
struct llama_sample_options *opts)
{
llama_token_data_array candidates_p = {
candidates,
n_candidates,
false,
};
struct llama_token_data newline = candidates_p.data[llama_token_nl()];
llama_sample_repetition_penalty(
ctx, &candidates_p,
last_tokens, n_last_tokens,
opts->repeat_penalty);
llama_sample_frequency_and_presence_penalties(
ctx, &candidates_p,
last_tokens, n_last_tokens,
opts->frequency_penalty, opts->presence_penalty);
if (!opts->penalize_newline) {
candidates_p.data[llama_token_nl()] = newline;
}
if (opts->temperature <= 0) {
return llama_sample_token_greedy(ctx, &candidates_p);
}
if (opts->mirostat == 1) {
int mirostat_m = 100;
float mirostat_mu = 2.0f * opts->mirostat_tau;
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token_mirostat(
ctx, &candidates_p,
opts->mirostat_tau, opts->mirostat_eta,
mirostat_m, &mirostat_mu);
} else if (opts->mirostat == 2) {
float mirostat_mu = 2.0f * opts->mirostat_tau;
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token_mirostat_v2(
ctx, &candidates_p,
opts->mirostat_tau, opts->mirostat_eta,
&mirostat_mu);
} else {
llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token(ctx, &candidates_p);
}
}
*/
import "C"
import (
"bytes"
"embed"
"errors"
"fmt"
"io"
"log"
"os"
"strings"
"sync"
"unicode/utf8"
"unsafe"
"github.com/jmorganca/ollama/api"
)
//go:embed ggml-metal.metal
var fs embed.FS
const ModelFamilyLlama ModelFamily = "llama"
type llamaModel struct {
hyperparameters llamaHyperparameters
}
func (llm *llamaModel) ModelFamily() ModelFamily {
return ModelFamilyLlama
}
func (llm *llamaModel) ModelType() ModelType {
switch llm.hyperparameters.NumLayer {
case 26:
return ModelType3B
case 32:
return ModelType7B
case 40:
return ModelType13B
case 60:
return ModelType30B
case 80:
return ModelType65B
}
// TODO: find a better default
return ModelType7B
}
func (llm *llamaModel) FileType() FileType {
return llm.hyperparameters.FileType
}
type llamaHyperparameters struct {
// NumVocab is the size of the model's vocabulary.
NumVocab uint32
// NumEmbd is the size of the model's embedding layer.
NumEmbd uint32
NumMult uint32
NumHead uint32
// NumLayer is the number of layers in the model.
NumLayer uint32
NumRot uint32
// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
FileType llamaFileType
}
type llamaFileType uint32
const (
llamaFileTypeF32 llamaFileType = iota
llamaFileTypeF16
llamaFileTypeQ4_0
llamaFileTypeQ4_1
llamaFileTypeQ4_1_F16
llamaFileTypeQ8_0 llamaFileType = iota + 2
llamaFileTypeQ5_0
llamaFileTypeQ5_1
llamaFileTypeQ2_K
llamaFileTypeQ3_K_S
llamaFileTypeQ3_K_M
llamaFileTypeQ3_K_L
llamaFileTypeQ4_K_S
llamaFileTypeQ4_K_M
llamaFileTypeQ5_K_S
llamaFileTypeQ5_K_M
llamaFileTypeQ6_K
)
func (ft llamaFileType) String() string {
switch ft {
case llamaFileTypeF32:
return "F32"
case llamaFileTypeF16:
return "F16"
case llamaFileTypeQ4_0:
return "Q4_0"
case llamaFileTypeQ4_1:
return "Q4_1"
case llamaFileTypeQ4_1_F16:
return "Q4_1_F16"
case llamaFileTypeQ8_0:
return "Q8_0"
case llamaFileTypeQ5_0:
return "Q5_0"
case llamaFileTypeQ5_1:
return "Q5_1"
case llamaFileTypeQ2_K:
return "Q2_K"
case llamaFileTypeQ3_K_S:
return "Q3_K_S"
case llamaFileTypeQ3_K_M:
return "Q3_K_M"
case llamaFileTypeQ3_K_L:
return "Q3_K_L"
case llamaFileTypeQ4_K_S:
return "Q4_K_S"
case llamaFileTypeQ4_K_M:
return "Q4_K_M"
case llamaFileTypeQ5_K_S:
return "Q5_K_S"
case llamaFileTypeQ5_K_M:
return "Q5_K_M"
case llamaFileTypeQ6_K:
return "Q6_K"
default:
return "Unknown"
}
}
type llama struct {
params *C.struct_llama_context_params
model *C.struct_llama_model
ctx *C.struct_llama_context
last []C.llama_token
embd []C.llama_token
cursor int
mu sync.Mutex
gc bool
api.Options
}
func newLlama(model string, adapters []string, opts api.Options) (*llama, error) {
if _, err := os.Stat(model); err != nil {
return nil, err
}
llm := llama{Options: opts}
C.llama_backend_init(C.bool(llm.UseNUMA))
params := C.llama_context_default_params()
params.seed = C.uint(llm.Seed)
params.n_ctx = C.int(llm.NumCtx)
params.n_batch = C.int(llm.NumBatch)
params.n_gqa = C.int(llm.NumGQA)
params.n_gpu_layers = C.int(llm.NumGPU)
params.main_gpu = C.int(llm.MainGPU)
params.low_vram = C.bool(llm.LowVRAM)
params.f16_kv = C.bool(llm.F16KV)
params.logits_all = C.bool(llm.LogitsAll)
params.vocab_only = C.bool(llm.VocabOnly)
params.use_mmap = C.bool(llm.UseMMap)
params.use_mlock = C.bool(llm.UseMLock)
params.embedding = C.bool(llm.EmbeddingOnly)
params.rope_freq_base = C.float(llm.RopeFrequencyBase)
params.rope_freq_scale = C.float(llm.RopeFrequencyScale)
if len(adapters) > 0 && llm.UseMMap {
log.Printf("must disable mmap to use lora adapters")
params.use_mmap = C.bool(false)
}
llm.params = &params
cModel := C.CString(model)
defer C.free(unsafe.Pointer(cModel))
llm.model = C.llama_load_model_from_file(cModel, params)
if llm.model == nil {
return nil, errors.New("failed to load model")
}
llm.ctx = C.llama_new_context_with_model(llm.model, params)
if llm.ctx == nil {
return nil, errors.New("failed to create context")
}
for _, adapter := range adapters {
cAdapter := C.CString(adapter)
defer C.free(unsafe.Pointer(cAdapter))
if retval := C.llama_model_apply_lora_from_file(llm.model, cAdapter, nil, C.int(llm.NumThread)); retval != 0 {
return nil, fmt.Errorf("failed to load adapter %s", adapter)
}
}
// warm up the model
bos := []C.llama_token{C.llama_token_bos()}
C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
C.llama_reset_timings(llm.ctx)
return &llm, nil
}
func (llm *llama) Close() {
llm.gc = true
llm.mu.Lock()
defer llm.mu.Unlock()
defer C.llama_free_model(llm.model)
defer C.llama_free(llm.ctx)
C.llama_print_timings(llm.ctx)
}
func (llm *llama) SetOptions(opts api.Options) {
llm.Options = opts
}
var errNeedMoreData = errors.New("need more data")
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
C.llama_reset_timings(llm.ctx)
llm.marshalPrompt(ctx, prompt)
C.llama_set_rng_seed(llm.ctx, C.uint(llm.Seed))
var b bytes.Buffer
for {
token, err := llm.next()
if llm.gc {
return nil
} else if errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
b.WriteString(llm.Decode(int(token)))
if err := llm.checkStopConditions(b); err != nil {
if errors.Is(err, io.EOF) {
break
} else if errors.Is(err, errNeedMoreData) {
continue
}
return err
}
if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
fn(api.GenerateResponse{Response: b.String()})
b.Reset()
}
}
embd := make([]int, len(llm.embd))
for i := range llm.embd {
embd[i] = int(llm.embd[i])
}
timings := C.llama_get_timings(llm.ctx)
fn(api.GenerateResponse{
Done: true,
Context: embd,
SampleCount: int(timings.n_sample),
SampleDuration: parseDurationMs(float64(timings.t_sample_ms)),
PromptEvalCount: int(timings.n_p_eval),
PromptEvalDuration: parseDurationMs(float64(timings.t_p_eval_ms)),
EvalCount: int(timings.n_eval),
EvalDuration: parseDurationMs(float64(timings.t_eval_ms)),
})
return nil
}
func (llm *llama) checkStopConditions(b bytes.Buffer) error {
for _, stopCondition := range llm.Stop {
if stopCondition == strings.TrimSpace(b.String()) {
return io.EOF
} else if strings.HasPrefix(stopCondition, strings.TrimSpace(b.String())) {
return errNeedMoreData
}
}
return nil
}
func (llm *llama) marshalPrompt(ctx []int, prompt string) []C.llama_token {
tokens := append(ctx, llm.Encode(prompt)...)
if llm.NumKeep < 0 {
llm.NumKeep = len(tokens)
}
cTokens := make([]C.llama_token, len(tokens))
for i := range tokens {
cTokens[i] = C.llama_token(tokens[i])
}
// min(llm.NumCtx - 4, llm.NumKeep)
if llm.NumCtx-4 < llm.NumKeep {
llm.NumKeep = llm.NumCtx - 4
}
if len(tokens) >= llm.NumCtx {
// truncate input
numLeft := (llm.NumCtx - llm.NumKeep) / 2
truncated := cTokens[:llm.NumKeep]
erasedBlocks := (len(cTokens) - llm.NumKeep - numLeft - 1) / numLeft
truncated = append(truncated, cTokens[llm.NumKeep+erasedBlocks*numLeft:]...)
copy(llm.last, cTokens[len(cTokens)-llm.NumCtx:])
cTokens = truncated
log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated))
} else {
llm.last = make([]C.llama_token, llm.NumCtx-len(cTokens))
llm.last = append(llm.last, cTokens...)
}
var i int
for i = 0; i < len(llm.embd) && i < len(cTokens) && llm.embd[i] == cTokens[i]; i++ {
// noop
}
llm.embd = cTokens
if i == len(cTokens) {
// evaluate at least one token to generate logits
i--
}
llm.cursor = i
log.Printf("prompt: num_past=%d cached=%v eval=%v", i, len(llm.embd[:i]), len(llm.embd[i:]))
return cTokens
}
func (llm *llama) Encode(prompt string) []int {
cPrompt := C.CString(prompt)
defer C.free(unsafe.Pointer(cPrompt))
cTokens := make([]C.llama_token, len(prompt)+1)
if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(cTokens), C.int(len(cTokens)), true); n > 0 {
tokens := make([]int, n)
for i := range cTokens[:n] {
tokens[i] = int(cTokens[i])
}
return tokens
}
return nil
}
func (llm *llama) Decode(tokens ...int) string {
var sb strings.Builder
for _, token := range tokens {
sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, C.llama_token(token))))
}
return sb.String()
}
func (llm *llama) next() (C.llama_token, error) {
llm.mu.Lock()
defer llm.mu.Unlock()
if len(llm.embd) >= llm.NumCtx {
numLeft := (llm.NumCtx - llm.NumKeep) / 2
truncated := llm.embd[:llm.NumKeep]
truncated = append(truncated, llm.embd[len(llm.embd)-numLeft:]...)
llm.embd = truncated
llm.cursor = llm.NumKeep
log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d cursor=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated), llm.cursor)
}
for {
if llm.gc {
return 0, io.EOF
}
if llm.cursor >= len(llm.embd) {
break
}
numEval := len(llm.embd) - llm.cursor
if numEval > llm.NumBatch {
numEval = llm.NumBatch
}
if retval := C.llama_eval(llm.ctx, unsafe.SliceData(llm.embd[llm.cursor:]), C.int(numEval), C.int(llm.cursor), C.int(llm.NumThread)); retval != 0 {
return 0, fmt.Errorf("llama_eval: %d", retval)
}
llm.cursor += numEval
}
var sampleOpts C.struct_llama_sample_options
sampleOpts.repeat_penalty = C.float(llm.RepeatPenalty)
sampleOpts.frequency_penalty = C.float(llm.FrequencyPenalty)
sampleOpts.presence_penalty = C.float(llm.PresencePenalty)
sampleOpts.temperature = C.float(llm.Temperature)
sampleOpts.top_k = C.int(llm.TopK)
sampleOpts.top_p = C.float(llm.TopP)
sampleOpts.tfs_z = C.float(llm.TFSZ)
sampleOpts.typical_p = C.float(llm.TypicalP)
sampleOpts.mirostat = C.int(llm.Mirostat)
sampleOpts.mirostat_tau = C.float(llm.MirostatTau)
sampleOpts.mirostat_eta = C.float(llm.MirostatEta)
sampleOpts.penalize_newline = C.bool(llm.PenalizeNewline)
numVocab := C.llama_n_vocab(llm.ctx)
logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)
// TODO: logit bias
candidates := make([]C.llama_token_data, numVocab)
for i := range logits {
candidates[i] = C.llama_token_data{
id: C.int(i),
logit: logits[i],
p: 0,
}
}
repeatLastN := llm.RepeatLastN
if len(llm.last) < repeatLastN {
repeatLastN = len(llm.last)
}
if llm.NumCtx < repeatLastN {
repeatLastN = llm.NumCtx
}
lastN := llm.last[len(llm.last)-repeatLastN:]
token := C.llama_sample(
llm.ctx,
unsafe.SliceData(candidates), C.size_t(len(candidates)),
unsafe.SliceData(lastN), C.size_t(len(lastN)),
&sampleOpts,
)
llm.last = append(llm.last, token)
llm.embd = append(llm.embd, token)
if token == C.llama_token_eos() {
return 0, io.EOF
}
return token, nil
}
func (llm *llama) Embedding(input string) ([]float64, error) {
if !llm.EmbeddingOnly {
return nil, errors.New("llama: embedding not enabled")
}
tokens := llm.Encode(input)
if tokens == nil {
return nil, errors.New("llama: tokenize embedding")
}
cTokens := make([]C.llama_token, len(tokens))
for i := range tokens {
cTokens[i] = C.llama_token(tokens[i])
}
retval := C.llama_eval(llm.ctx, unsafe.SliceData(cTokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
if retval != 0 {
return nil, errors.New("llama: eval")
}
C.llama_print_timings(llm.ctx)
n := C.llama_n_embd(llm.ctx)
if n <= 0 {
return nil, errors.New("llama: no embeddings generated")
}
cEmbeddings := unsafe.Slice(C.llama_get_embeddings(llm.ctx), n)
embeddings := make([]float64, len(cEmbeddings))
for i, v := range cEmbeddings {
embeddings[i] = float64(v)
}
return embeddings, nil
}

View File

@@ -1,5 +1,5 @@
/**
* llama.cpp - git e782c9e735f93ab4767ffc37462c523b73a17ddc
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
@@ -79,6 +79,10 @@
#define LLAMA_SUPPORTS_GPU_OFFLOAD
#endif
#ifndef LLAMA_DEFAULT_RMS_EPS
#define LLAMA_DEFAULT_RMS_EPS 5e-6f
#endif
#ifdef __cplusplus
extern "C" {
#endif
@@ -108,13 +112,29 @@ extern "C" {
typedef void (*llama_progress_callback)(float progress, void *ctx);
struct llama_context_params {
uint32_t seed; // RNG seed, -1 for random
int32_t n_ctx; // text context
int32_t n_batch; // prompt processing batch size
int32_t n_gpu_layers; // number of layers to store in VRAM
int32_t main_gpu; // the GPU that is used for scratch and small tensors
float tensor_split[LLAMA_MAX_DEVICES]; // how to split layers across multiple GPUs
enum llama_log_level {
LLAMA_LOG_LEVEL_ERROR = 2,
LLAMA_LOG_LEVEL_WARN = 3,
LLAMA_LOG_LEVEL_INFO = 4
};
// Signature for logging events
// Note that text includes the new line character at the end for most events.
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
// if it exists.
// It might not exist for progress report where '.' is output repeatedly.
typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
struct llama_context_params {
uint32_t seed; // RNG seed, -1 for random
int32_t n_ctx; // text context
int32_t n_batch; // prompt processing batch size
int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams)
float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams)
int32_t n_gpu_layers; // number of layers to store in VRAM
int32_t main_gpu; // the GPU that is used for scratch and small tensors
const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
// ref: https://github.com/ggerganov/llama.cpp/pull/2054
float rope_freq_base; // RoPE base frequency
@@ -127,6 +147,7 @@ extern "C" {
// Keep the booleans together to avoid misalignment during copy-by-value.
bool low_vram; // if true, reduce VRAM usage at the cost of performance
bool mul_mat_q; // if true, use experimental mul_mat_q kernels
bool f16_kv; // use fp16 for KV cache
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool vocab_only; // only load the vocabulary, no weights
@@ -165,6 +186,40 @@ extern "C" {
bool quantize_output_tensor; // quantize output.weight
} llama_model_quantize_params;
// grammar types
struct llama_grammar;
// grammar element type
enum llama_gretype {
// end of rule definition
LLAMA_GRETYPE_END = 0,
// start of alternate definition for rule
LLAMA_GRETYPE_ALT = 1,
// non-terminal element: reference to rule
LLAMA_GRETYPE_RULE_REF = 2,
// terminal element: character (code point)
LLAMA_GRETYPE_CHAR = 3,
// inverse char(s) ([^a], [^a-b] [^abc])
LLAMA_GRETYPE_CHAR_NOT = 4,
// modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
// be an inclusive range ([a-z])
LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
// modifies a preceding LLAMA_GRETYPE_CHAR or
// LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
LLAMA_GRETYPE_CHAR_ALT = 6,
};
typedef struct llama_grammar_element {
enum llama_gretype type;
uint32_t value; // Unicode code point or rule ID
} llama_grammar_element;
// performance timing information
struct llama_timings {
double t_start_ms;
@@ -179,6 +234,10 @@ extern "C" {
int32_t n_eval;
};
// Set callback for all future logging events.
// If this is not called, or NULL is supplied, everything is output on stderr.
LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
LLAMA_API int llama_max_devices();
LLAMA_API struct llama_context_params llama_context_default_params();
@@ -357,6 +416,15 @@ extern "C" {
LLAMA_API llama_token llama_token_eos(); // end-of-sentence
LLAMA_API llama_token llama_token_nl(); // next-line
// Grammar
//
LLAMA_API struct llama_grammar * llama_grammar_init(
const llama_grammar_element ** rules,
size_t n_rules,
size_t start_rule_index);
LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
// Sampling functions
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
@@ -369,13 +437,11 @@ extern "C" {
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted.
/// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
/// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
/// @params smooth_factor Smooth factor between guidance logits and original logits. 1.0f means only use guidance logits. 0.0f means only original logits.
LLAMA_API void llama_sample_classifier_free_guidance(
struct llama_context * ctx,
llama_token_data_array * candidates,
struct llama_context * guidance_ctx,
float scale,
float smooth_factor);
float scale);
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
@@ -393,6 +459,9 @@ extern "C" {
LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
/// @details Apply constraints from grammar
LLAMA_API void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar);
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
@@ -414,6 +483,9 @@ extern "C" {
/// @details Randomly selects a token from the candidates based on their probabilities.
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
/// @details Accepts the sampled token into the grammar
LLAMA_API void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token);
// Performance information
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
LLAMA_API void llama_print_timings(struct llama_context * ctx);

81
llm/llama_darwin.go Normal file
View File

@@ -0,0 +1,81 @@
package llm
import (
"bytes"
"crypto/sha256"
"errors"
"io"
"log"
"os"
"path/filepath"
)
func init() {
if err := initBackend(); err != nil {
log.Printf("WARNING: GPU could not be initialized correctly: %v", err)
log.Printf("WARNING: falling back to CPU")
}
}
func initBackend() error {
exec, err := os.Executable()
if err != nil {
return err
}
exec, err = filepath.EvalSymlinks(exec)
if err != nil {
return err
}
metal := filepath.Join(filepath.Dir(exec), "ggml-metal.metal")
fi, err := os.Stat(metal)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
if fi != nil {
actual, err := os.Open(metal)
if err != nil {
return err
}
defer actual.Close()
actualSum := sha256.New()
if _, err := io.Copy(actualSum, actual); err != nil {
return err
}
expect, err := fs.Open("ggml-metal.metal")
if err != nil {
return err
}
expectSum := sha256.New()
if _, err := io.Copy(expectSum, expect); err != nil {
return err
}
if bytes.Equal(actualSum.Sum(nil), expectSum.Sum(nil)) {
return nil
}
}
dst, err := os.Create(filepath.Join(filepath.Dir(exec), "ggml-metal.metal"))
if err != nil {
return err
}
defer dst.Close()
src, err := fs.Open("ggml-metal.metal")
if err != nil {
return err
}
defer src.Close()
if _, err := io.Copy(dst, src); err != nil {
return err
}
return nil
}

74
llm/llm.go Normal file
View File

@@ -0,0 +1,74 @@
package llm
import (
"fmt"
"log"
"os"
"github.com/pbnjay/memory"
"github.com/jmorganca/ollama/api"
)
type LLM interface {
Predict([]int, string, func(api.GenerateResponse)) error
Embedding(string) ([]float64, error)
Encode(string) []int
Decode(...int) string
SetOptions(api.Options)
Close()
}
func New(model string, adapters []string, opts api.Options) (LLM, error) {
if _, err := os.Stat(model); err != nil {
return nil, err
}
f, err := os.Open(model)
if err != nil {
return nil, err
}
defer f.Close()
ggml, err := DecodeGGML(f, ModelFamilyLlama)
if err != nil {
return nil, err
}
switch ggml.FileType().String() {
case "F32", "F16", "Q5_0", "Q5_1", "Q8_0":
if opts.NumGPU != 0 {
// F32, F16, Q5_0, Q5_1, and Q8_0 do not support Metal API and will
// cause the runner to segmentation fault so disable GPU
log.Printf("WARNING: GPU disabled for F32, F16, Q5_0, Q5_1, and Q8_0")
opts.NumGPU = 0
}
}
totalResidentMemory := memory.TotalMemory()
switch ggml.ModelType() {
case ModelType3B, ModelType7B:
if totalResidentMemory < 8*1024*1024 {
return nil, fmt.Errorf("model requires at least 8GB of memory")
}
case ModelType13B:
if totalResidentMemory < 16*1024*1024 {
return nil, fmt.Errorf("model requires at least 16GB of memory")
}
case ModelType30B:
if totalResidentMemory < 32*1024*1024 {
return nil, fmt.Errorf("model requires at least 32GB of memory")
}
case ModelType65B:
if totalResidentMemory < 64*1024*1024 {
return nil, fmt.Errorf("model requires at least 64GB of memory")
}
}
switch ggml.ModelFamily() {
case ModelFamilyLlama:
return newLlama(model, adapters, opts)
default:
return nil, fmt.Errorf("unknown ggml type: %s", ggml.ModelFamily())
}
}

0
llama/update-llama-cpp.sh → llm/update-llama-cpp.sh Normal file → Executable file
View File

15
llm/utils.go Normal file
View File

@@ -0,0 +1,15 @@
package llm
import (
"fmt"
"time"
)
func parseDurationMs(ms float64) time.Duration {
dur, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
if err != nil {
panic(err)
}
return dur
}

View File

@@ -4,8 +4,9 @@ import (
"context"
"github.com/jmorganca/ollama/cmd"
"github.com/spf13/cobra"
)
func main() {
cmd.NewCLI().ExecuteContext(context.Background())
cobra.CheckErr(cmd.NewCLI().ExecuteContext(context.Background()))
}

View File

@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"io"
"log"
)
type Command struct {
@@ -20,16 +21,16 @@ func (c *Command) Reset() {
func Parse(reader io.Reader) ([]Command, error) {
var commands []Command
var command, modelCommand Command
scanner := bufio.NewScanner(reader)
scanner.Buffer(make([]byte, 0, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
scanner.Split(scanModelfile)
for scanner.Scan() {
line := scanner.Bytes()
fields := bytes.SplitN(line, []byte(" "), 2)
if len(fields) == 0 {
if len(fields) == 0 || len(fields[0]) == 0 {
continue
}
@@ -39,14 +40,22 @@ func Parse(reader io.Reader) ([]Command, error) {
command.Args = string(fields[1])
// copy command for validation
modelCommand = command
case "LICENSE", "TEMPLATE", "SYSTEM", "PROMPT":
case "LICENSE", "TEMPLATE", "SYSTEM", "PROMPT", "EMBED", "ADAPTER":
command.Name = string(bytes.ToLower(fields[0]))
command.Args = string(fields[1])
case "PARAMETER":
fields = bytes.SplitN(fields[1], []byte(" "), 2)
if len(fields) < 2 {
return nil, fmt.Errorf("missing value for %s", fields)
}
command.Name = string(fields[0])
command.Args = string(fields[1])
default:
if !bytes.HasPrefix(fields[0], []byte("#")) {
// log a warning for unknown commands
log.Printf("WARNING: Unknown command: %s", fields[0])
}
continue
}
@@ -55,28 +64,53 @@ func Parse(reader io.Reader) ([]Command, error) {
}
if modelCommand.Args == "" {
return nil, fmt.Errorf("no FROM line for the model was specified")
return nil, errors.New("no FROM line for the model was specified")
}
return commands, scanner.Err()
}
func scanModelfile(data []byte, atEOF bool) (advance int, token []byte, err error) {
advance, token, err = scan([]byte(`"""`), []byte(`"""`), data, atEOF)
if err != nil {
return 0, nil, err
}
if advance > 0 && token != nil {
return advance, token, nil
}
advance, token, err = scan([]byte(`"`), []byte(`"`), data, atEOF)
if err != nil {
return 0, nil, err
}
if advance > 0 && token != nil {
return advance, token, nil
}
return bufio.ScanLines(data, atEOF)
}
func scan(openBytes, closeBytes, data []byte, atEOF bool) (advance int, token []byte, err error) {
newline := bytes.IndexByte(data, '\n')
if start := bytes.Index(data, []byte(`"""`)); start >= 0 && start < newline {
end := bytes.Index(data[start+3:], []byte(`"""`))
if start := bytes.Index(data, openBytes); start >= 0 && start < newline {
end := bytes.Index(data[start+len(openBytes):], closeBytes)
if end < 0 {
if atEOF {
return 0, nil, errors.New(`unterminated multiline string: """`)
return 0, nil, fmt.Errorf("unterminated %s: expecting %s", openBytes, closeBytes)
} else {
return 0, nil, nil
}
}
n := start + 3 + end + 3
return n, bytes.Replace(data[:n], []byte(`"""`), []byte(""), 2), nil
n := start + len(openBytes) + end + len(closeBytes)
newData := data[:start]
newData = append(newData, data[start+len(openBytes):n-len(closeBytes)]...)
return n, newData, nil
}
return bufio.ScanLines(data, atEOF)
return 0, nil, nil
}

26
scripts/build_darwin.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
mkdir -p dist
GO_LDFLAGS="-X github.com/jmorganca/ollama/version.Version=$VERSION"
GO_LDFLAGS="$GO_LDFLAGS -X github.com/jmorganca/ollama/server.mode=release"
# build universal binary
CGO_ENABLED=1 GOARCH=arm64 go build -ldflags "$GO_LDFLAGS" -o dist/ollama-darwin-arm64
CGO_ENABLED=1 GOARCH=amd64 go build -ldflags "$GO_LDFLAGS" -o dist/ollama-darwin-amd64
lipo -create -output dist/ollama dist/ollama-darwin-arm64 dist/ollama-darwin-amd64
rm dist/ollama-darwin-amd64 dist/ollama-darwin-arm64
codesign --deep --force --options=runtime --sign "$APPLE_IDENTITY" --timestamp dist/ollama
chmod +x dist/ollama
# build and sign the mac app
npm install --prefix app
npm run --prefix app make:sign
cp app/out/make/zip/darwin/universal/Ollama-darwin-universal-${VERSION:-0.0.0}.zip dist/Ollama-darwin.zip
# sign the binary and rename it
codesign -f --timestamp -s "$APPLE_IDENTITY" --identifier ai.ollama.ollama --options=runtime dist/ollama
ditto -c -k --keepParent dist/ollama dist/temp.zip
xcrun notarytool submit dist/temp.zip --wait --timeout 10m --apple-id $APPLE_ID --password $APPLE_PASSWORD --team-id $APPLE_TEAM_ID
mv dist/ollama dist/ollama-darwin
rm dist/temp.zip

Some files were not shown because too many files have changed in this diff Show More