Compare commits

..

425 Commits

Author SHA1 Message Date
Jeffrey Morgan
9ec16f0f03 fix formatting when exiting ollama run 2023-10-27 21:26:23 -07:00
Jeffrey Morgan
57a58db1b0 history: update pos after compact 2023-10-27 20:38:03 -07:00
Jeffrey Morgan
2d75a4537c close input channel when receiving io.EOF 2023-10-27 20:26:04 -07:00
Jeffrey Morgan
4748609611 Don't quit ioloop on NUL character (#940)
* dont quit ioloop on 0 rune

* check for closed channel

* remove unused error on `Close()`
2023-10-27 20:01:48 -07:00
Jeffrey Morgan
c0dcea1398 Update faq.md 2023-10-27 18:29:00 -07:00
Jeffrey Morgan
3a1ed9ff70 restore building runner with AVX on by default (#900) 2023-10-27 12:13:44 -07:00
Bruce MacDonald
6d283882b1 catch insufficient permissions nvidia err (#934) 2023-10-27 12:42:40 -04:00
Bruce MacDonald
5c3491f425 allow for a configurable ollama model storage directory (#897)
* allow for a configurable ollama models directory

- set OLLAMA_MODELS in the environment that ollama is running in to change where model files are stored
- update docs

Co-Authored-By: Jeffrey Morgan <jmorganca@gmail.com>
Co-Authored-By: Jay Nakrani <dhananjaynakrani@gmail.com>
Co-Authored-By: Akhil Acharya <akhilcacharya@gmail.com>
Co-Authored-By: Sasha Devol <sasha.devol@protonmail.com>
2023-10-27 10:19:59 -04:00
James Braza
e5d1ce4dde Tweaks to README.md (#906)
* Mentioned Docker Hub in docs
* Consolidated brew installs to one line
2023-10-27 00:10:23 -07:00
Bruce MacDonald
2665f3c28e offload 75% of available vram to improve stability (#921) 2023-10-26 20:49:55 -04:00
Patrick Devine
a79f030e75 add bracketed paste mode (#922) 2023-10-26 15:57:00 -07:00
Michael Yang
9bc5864a03 Merge pull request #918 from jmorganca/mxyng/fix-out-of-space
fix(download): no retry when out of space
2023-10-26 12:24:20 -07:00
Michael Yang
b88cc0fac9 Merge pull request #916 from jmorganca/mxyng/fix-client-host
fix(client): trim trailing slash
2023-10-26 12:24:12 -07:00
Patrick Devine
5b2cf16397 fix docker build annotations (#917) 2023-10-26 12:00:33 -07:00
Michael Yang
910816a532 fix(download): no retry when out of space 2023-10-26 11:34:07 -07:00
Michael Yang
28c3f288e2 client: fix trailing slash 2023-10-26 11:09:38 -07:00
Patrick Devine
deeac961bb new readline library (#847) 2023-10-25 16:41:18 -07:00
Jeffrey Morgan
49443e7da5 fix typo in README.md 2023-10-25 16:19:27 -07:00
Ajay Kemparaj
bb8464c0d2 update golang.org/x/net fixes CVE-2023-3978,CVE-2023-39325,CVE-2023-44487 (#855) 2023-10-25 16:17:24 -07:00
Michael Yang
daa5bb4473 Merge pull request #907 from jmorganca/mxyng/linux
update linux.md
2023-10-25 15:03:34 -07:00
Michael Yang
92119de9d8 update linux.md 2023-10-25 14:57:50 -07:00
Michael Yang
53b0ba8d43 Merge pull request #893 from jmorganca/mxyng/update-faq
update faq
2023-10-24 16:02:35 -07:00
Michael Yang
db342691f9 Update docs/faq.md
Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>
2023-10-24 13:59:33 -07:00
Bruce MacDonald
cecf83141e Linux uninstall instructions (#894) 2023-10-24 14:07:05 -04:00
Michael Yang
a5a2adf1ec update faq 2023-10-24 10:54:16 -07:00
Jeffrey Morgan
b0c9cd0f3b fix metal assertion errors 2023-10-24 00:32:36 -07:00
Jeffrey Morgan
77f61c6301 update submodule commit 2023-10-24 00:30:27 -07:00
Jeffrey Morgan
f3604534e5 update submodule commit 2023-10-23 23:59:12 -07:00
Jeffrey Morgan
914428351a Update import.md 2023-10-23 17:44:53 -07:00
Jeffrey Morgan
9afea9e3b9 Update import.md
Separate GGUF and PyTorch guides
2023-10-23 17:42:17 -07:00
Bruce MacDonald
c039432b5c add current user to ollama group on install (#772) 2023-10-23 17:06:31 -04:00
Michael Yang
c345b4ca7c Merge pull request #884 from jmorganca/mxyng/update-submodules
bump submodules
2023-10-23 11:27:38 -07:00
Michael Yang
0c7a00a264 bump submodules
pin to 9e70cc03229df19ca2d28ce23cc817198f897278 for now since
438c2ca83045a00ef244093d27e9ed41a8cb4ea9 is breaking
2023-10-23 11:17:59 -07:00
Michael Yang
36c160f1c3 Merge pull request #881 from jmorganca/mxyng/ggufv3
ggufv3
2023-10-23 10:50:45 -07:00
Michael Yang
b66bcaa582 Merge pull request #883 from jmorganca/mxyng/logs
update default log target
2023-10-23 10:50:29 -07:00
Michael Yang
c9167494cb update default log target 2023-10-23 10:44:50 -07:00
Michael Yang
125d0a013a ggufv3
ggufv3 adds support for big endianness, mainly for s390x architecture.
while that's not currently supported for ollama, the change is simple.

loosen version check to be more forward compatible. unless specified,
gguf versions other v1 will be decoded into v2.
2023-10-23 09:35:49 -07:00
Richard Awoyemi
ba2da6ceaa Added a minimalist React UI for Ollama models to the community contributions.md (#870) 2023-10-23 10:44:39 -04:00
Jeffrey Morgan
ccff9ca09c Update README.md 2023-10-21 11:58:10 -04:00
Jeffrey Morgan
436a5be49c Update README.md 2023-10-21 11:57:32 -04:00
Matt Williams
cc0bf96398 Merge pull request #829 from jmorganca/mattw/example-summarize-news
added python rag news summary
2023-10-20 21:03:16 -07:00
Michael Yang
386169205c update runtime options (#864) 2023-10-20 21:17:14 -04:00
Michael Yang
0d6342a882 Merge pull request #863 from jmorganca/mxyng/nil-pointer
fix: nil pointer dereference
2023-10-20 17:23:37 -07:00
Michael Yang
75bee074b6 fix: nil pointer dereference 2023-10-20 16:55:24 -07:00
Michael Yang
533d76368c Merge pull request #859 from jmorganca/mxyng/fix-hostname
fix: ollama host for hostname
2023-10-20 11:40:56 -07:00
Michael Yang
459f4a7889 fix: ollama host for hostname 2023-10-20 11:32:41 -07:00
Matt Williams
25c63c91d8 Update README.md
Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2023-10-19 13:52:40 -07:00
Jeffrey Morgan
cbfff4f868 update dependencies in app/ 2023-10-19 15:52:41 -04:00
Jeffrey Morgan
7ed5a39bc7 simpler check for model loading compatibility errors 2023-10-19 14:50:49 -04:00
Michael Yang
cc1d03f4ec Merge pull request #841 from jmorganca/mxyng/cleanup-cmd-args 2023-10-19 11:22:40 -07:00
Michael Yang
846f593dbf Merge pull request #828 from jmorganca/mxyng/template-parameters
image: show parameters
2023-10-19 09:31:31 -07:00
Michael Yang
0a53da03fd Merge pull request #843 from jmorganca/mxyng/request-validation
basic request validation
2023-10-19 09:30:45 -07:00
Michael Yang
2ce1793a1d go fmt 2023-10-19 09:21:51 -07:00
Michael Yang
e1c5be24e7 check json eof 2023-10-19 09:21:51 -07:00
Michael Yang
2ad8a074ac generate: set created_at
move the empty response so it's more visible
2023-10-19 09:21:51 -07:00
Michael Yang
7e547c6833 s/message/error/ 2023-10-19 09:21:04 -07:00
Michael Yang
689842b9ff request: bad request when model missing fields 2023-10-19 09:21:04 -07:00
Michael Yang
a19d47642e models: rm workDir from CreateModel
unused after removing EMBED
2023-10-19 09:21:04 -07:00
Jeffrey Morgan
a7dad24d92 add error for falcon and starcoder vocab compatibility (#844)
add error for falcon and starcoder vocab compatibility
---------
Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>
2023-10-19 12:18:31 -04:00
Jeffrey Morgan
6b213216d5 Update import.md 2023-10-19 12:17:36 -04:00
Bruce MacDonald
fe6f3b48f7 do not reload the running llm when runtime params change (#840)
- only reload the running llm if the model has changed, or the options for loading the running model have changed
- rename loaded llm to runner to differentiate from loaded model image
- remove logic which keeps the first system prompt in the generation context
2023-10-19 10:39:58 -04:00
Michael Yang
36c88cb9db cmd: set ExactArgs 2023-10-18 14:40:48 -07:00
Michael Yang
235e43d7f6 Merge pull request #833 from discovertomorrow/leadingspace
Fix Issue with Leading Whitespaces in Decoded Context
2023-10-18 13:52:48 -07:00
Arne Müller
730996e530 use TrimPrefix instead of TrimLeft 2023-10-18 22:51:30 +02:00
Arne Müller
ce6197a8e0 removed redundant strings.CutPrefix from Decode 2023-10-18 22:47:20 +02:00
Arne Müller
46b9953f32 use strings.TrimLeft to remove spaces 2023-10-18 22:41:19 +02:00
Michael Yang
4dcceeffb7 let the template do the work 2023-10-18 13:12:00 -07:00
Michael Yang
019e4a4558 image: show parameters 2023-10-18 13:12:00 -07:00
Michael Yang
627d04d927 Merge pull request #827 from jmorganca/mxyng/template-adapters
model: native gotemplate adapter template
2023-10-18 13:11:25 -07:00
Michael Yang
940e8ebec3 Merge pull request #826 from jmorganca/mxyng/template-system
show: no template system if empty
2023-10-18 13:11:09 -07:00
Bruce MacDonald
565648f3f7 relay CUDA errors to the client (#825) 2023-10-18 15:36:56 -04:00
Arne Müller
90c49bed57 moved removal of leading space into Predict 2023-10-18 20:08:26 +02:00
Michael Yang
3a2477174f Merge pull request #822 from ggozad/fix-tags-api
Fix /api/tags for no models.
2023-10-18 09:34:00 -07:00
Yiorgis Gozadinos
8c6c2cbc8c When the .ollama folder is broken or there are no models return an empty list on /api/tags 2023-10-18 08:23:20 +02:00
Arne Müller
5dc0cff459 fix whitespace removal 2023-10-18 08:15:27 +02:00
Matt Williams
c5c8b4b16a added python rag news summary
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-17 16:41:28 -07:00
Michael Yang
8299bf76ed model: native gotemplate adapter template 2023-10-17 15:28:38 -07:00
Michael Yang
ee4979e510 show: no template system if empty 2023-10-17 15:25:43 -07:00
Michael Yang
08b0e04f40 Merge pull request #813 from jmorganca/mxyng/llama
refactor llm/llama.go
2023-10-17 14:05:58 -07:00
Michael Yang
b36b0b71f8 use cut prefix 2023-10-17 14:01:39 -07:00
Michael Yang
094df37563 remove unused struct 2023-10-17 14:01:38 -07:00
Bruce MacDonald
f3648fd206 Update llama.cpp gguf to latest (#710) 2023-10-17 16:55:16 -04:00
Bruce MacDonald
bd93a94abd fix MB VRAM log output (#824) 2023-10-17 15:35:16 -04:00
Michael Yang
f55bdb6f10 Merge pull request #799 from deichbewohner/jsonmarshaling
Fix JSON Marshal Escaping for Special Characters
2023-10-17 08:46:02 -07:00
Michael Yang
2870a9bfc8 Merge pull request #812 from jmorganca/mxyng/fix-format-string
fix: wrong format string type
2023-10-17 08:40:49 -07:00
Michael Yang
c031c211d1 Merge pull request #809 from jmorganca/mxyng/fix-gpu
fix: regression unsupported metal types
2023-10-17 08:40:40 -07:00
Andreas Wäscher
68391b0055 Add OllamaSharp for .NET (#811) 2023-10-17 11:31:48 -04:00
Alexander F. Rødseth
b7e137323a Fix a typo (#818) 2023-10-17 09:00:15 -04:00
Arne Müller
8fa3f366ad Removed newline trimming and used buffer directly in POST request. 2023-10-17 08:17:35 +02:00
Michael Yang
fddb303f23 fix: format string wrong type 2023-10-16 16:14:28 -07:00
Michael Yang
ad5ee20c7b Merge pull request #794 from ggozad/add_oterm
Add oterm to community integrations
2023-10-16 15:51:55 -07:00
Michael Yang
785b4eb5bf Merge branch 'main' into add_oterm 2023-10-16 15:51:44 -07:00
Michael Yang
16ede1b30b Merge pull request #801 from s-kostyaev/add-ellama-community-integration
Add ellama community integration
2023-10-16 15:51:25 -07:00
Michael Yang
17d6bbbb2a Merge pull request #810 from vieux/patch-1
Update install.sh
2023-10-16 15:50:57 -07:00
Victor Vieux
6481b7f34c Update install.sh, avoid ARCH: unbound variable 2023-10-16 14:40:24 -07:00
Michael Yang
cb4a80b693 fix: regression unsupported metal types
omitting `--n-gpu-layers` means use metal on macos which isn't correct
since ollama uses `num_gpu=0` to explicitly disable gpu for file types
that are not implemented in metal
2023-10-16 14:37:20 -07:00
Bruce MacDonald
68d7255bd3 show request to server rather than local check (#778) 2023-10-16 17:27:25 -04:00
Michael Yang
9ef2fce33a Merge pull request #768 from jmorganca/mxyng/bytes
fix memory check
2023-10-16 12:42:41 -07:00
Michael Yang
43eaba3d60 Merge pull request #787 from jmorganca/mxyng/server-version2
server: print version on start
2023-10-16 09:59:30 -07:00
Michael Yang
1af493c5a0 server: print version on start 2023-10-16 09:59:14 -07:00
Bruce MacDonald
a0c3e989de deprecate modelfile embed command (#759) 2023-10-16 11:07:37 -04:00
Sergey Kostyaev
7af0fdce48 add ellama community integration 2023-10-16 16:39:10 +07:00
Arne Müller
ee94693b1a handling unescaped json marshaling 2023-10-16 11:15:55 +02:00
Yiorgis Gozadinos
731dbdc1a5 Add oterm to community integrations 2023-10-15 23:21:17 +02:00
Jeffrey Morgan
06bcfbd629 cleanup docker section in readme 2023-10-15 02:33:25 -04:00
Jeffrey Morgan
7d7c2510f8 add docker exec command to readme 2023-10-15 02:31:15 -04:00
Jeffrey Morgan
f9b2f999ac update readme with docker setup and link to import.md 2023-10-15 02:23:03 -04:00
Jeffrey Morgan
c416087339 import.md: formatting and spelling 2023-10-15 01:39:46 -04:00
Jeffrey Morgan
6002cebd2c import.md: convert and quantize docs 2023-10-15 00:11:51 -04:00
Jeffrey Morgan
212bdc541c import.md: model architectures spelling 2023-10-15 00:07:58 -04:00
Jeffrey Morgan
dca6686273 add steps for creating a Modelfile and more example commands to import.md 2023-10-15 00:05:50 -04:00
Jeffrey Morgan
598621afab add push script for docker images 2023-10-14 14:24:39 -04:00
Matt Williams
6479f49c09 Merge pull request #773 from jmorganca/mattw/howtoquant
add how to quantize doc
2023-10-14 08:29:39 -07:00
Matt Williams
b2974a7095 applied mikes comments
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-14 08:29:24 -07:00
Jeffrey Morgan
832b4db9d4 Use correct url for auto updates 2023-10-13 19:04:42 -04:00
Bruce MacDonald
c43873f33b check update response (#785) 2023-10-13 18:05:46 -04:00
Michael Yang
11d82d7b9b update checkvram 2023-10-13 14:47:29 -07:00
Michael Yang
36fe2deebf only check system memory on macos 2023-10-13 14:47:29 -07:00
Michael Yang
4a8931f634 check total (system + video) memory 2023-10-13 14:47:29 -07:00
Michael Yang
bd6e38fb1a refactor memory check 2023-10-13 14:47:29 -07:00
Michael Yang
92189a5855 fix memory check 2023-10-13 14:47:29 -07:00
Michael Yang
d790bf9916 Merge pull request #783 from jmorganca/mxyng/fix-gpu-offloading
fix: offloading on low end GPUs
2023-10-13 14:36:44 -07:00
Michael Yang
35afac099a do not use gpu binary when num_gpu == 0 2023-10-13 14:32:12 -07:00
Michael Yang
811c3d1900 no gpu if vram < 2GB 2023-10-13 14:32:12 -07:00
Bruce MacDonald
3553d10769 check for newer updates (#784)
Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2023-10-13 17:29:46 -04:00
Bruce MacDonald
6fe178134d improve api error handling (#781)
- remove new lines from llama.cpp error messages relayed to client
- check api option types and return error on wrong type
- change num layers from 95% VRAM to 92% VRAM
2023-10-13 16:57:10 -04:00
Jeffrey Morgan
d890890f66 use lower glibc versions in Dockerfile.build 2023-10-13 01:06:19 -04:00
Jeffrey Morgan
89ba19feca use Go 1.21.3 in Dockerfile 2023-10-12 23:23:12 -04:00
Jeffrey Morgan
6f58c77671 update Dockerfile.build for linux binary builds 2023-10-12 22:14:20 -04:00
Matt Williams
3c975f898f update doc to refer to docker image
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-12 15:57:50 -07:00
Matt Williams
9245c8a1df add how to quantize doc
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-12 15:34:57 -07:00
Michael Yang
7a537cdca9 Merge pull request #770 from jmorganca/mxyng/fix-download
fix download
2023-10-12 12:56:43 -07:00
Michael Yang
257ffeb997 fix download 2023-10-12 12:52:43 -07:00
Matt Williams
9b513bb6b1 Merge pull request #753 from jmorganca/mattw/examplereorg
rename the examples to be more descriptive
2023-10-12 11:24:12 -07:00
Matt Williams
042100f797 final rename
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-12 11:23:41 -07:00
Bruce MacDonald
7804b8fab9 validate api options fields from map (#711) 2023-10-12 11:18:11 -04:00
Bruce MacDonald
56497663c8 relay model runner error message to client (#720)
* give direction to user when runner fails
* also relay errors from timeout
* increase timeout to 3 minutes
2023-10-12 11:16:37 -04:00
Matt Williams
e1afcb8af2 simple gen to simple
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-11 21:29:07 -07:00
Matt Williams
385eeea357 remove with
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-11 21:26:11 -07:00
Matt Williams
8a41b244e8 add golang gen
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-11 21:20:50 -07:00
Jeffrey Morgan
92578798bb fix relative links in README.md 2023-10-11 19:24:06 -04:00
Michael Yang
788637918a Merge pull request #760 from jmorganca/mxyng/more-downloads
Mxyng/more downloads
2023-10-11 14:33:10 -07:00
Michael Yang
c413a55093 download: handle inner errors 2023-10-11 14:15:30 -07:00
Michael Yang
630bb75d2a dynamically size download parts based on file size 2023-10-11 14:10:25 -07:00
Michael Yang
a2055a1e93 update download 2023-10-11 14:10:25 -07:00
Michael Yang
b599946b74 add format bytes 2023-10-11 14:08:23 -07:00
Michael Yang
aca2d65b82 Merge pull request #757 from jmorganca/mxyng/format-time
cleanup format time
2023-10-11 11:12:29 -07:00
Michael Yang
b5e08e3373 cleanup format time 2023-10-11 11:09:27 -07:00
Bruce MacDonald
274d5a5fdf optional parameter to not stream response (#639)
* update streaming request accept header
* add optional stream param to request bodies
2023-10-11 12:54:27 -04:00
Matt Williams
fc6b49be32 add ts alternate to python langchain simplegen
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-11 09:50:15 -07:00
Bruce MacDonald
77295f716e prevent waiting on exited command (#752)
* prevent waiting on exited command
* close llama runner once
2023-10-11 12:32:13 -04:00
Matt Williams
615f7d1dea cleanup readme.
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-11 06:13:29 -07:00
Matt Williams
cdf5e106ae rename dirs
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-11 06:10:24 -07:00
Matt Williams
a85329f59a rename the models to be more descriptive
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-10-10 17:40:02 -07:00
Bruce MacDonald
f2ba1311aa improve vram safety with 5% vram memory buffer (#724)
* check free memory not total
* wait for subprocess to exit
2023-10-10 16:16:09 -04:00
Jeffrey Morgan
65dcd0ce35 always cleanup blob download (#747) 2023-10-10 13:12:29 -04:00
Michael Yang
0040f543a2 Merge pull request #743 from jmorganca/mxyng/http-proxy
handle upstream proxies
2023-10-10 09:59:06 -07:00
Matt Williams
767f9bdbbb Merge pull request #585 from jmorganca/matt/examplementors
add the example for ask the mentors
2023-10-09 13:58:14 -07:00
Costa Alexoglou
f7f5169c94 Update api.md (#741)
Avoid triple ticks in visual editor and also copied in clipboard.
2023-10-09 16:01:46 -04:00
Michael Yang
2cfffea02e handle client proxy 2023-10-09 12:33:47 -07:00
Michael Yang
f6e98334e4 handle upstream proxies 2023-10-09 11:42:36 -07:00
Jeffrey Morgan
ab0668293c llm: fix build on amd64 2023-10-06 14:39:54 -07:00
Bruce MacDonald
af4cf55884 not found error before pulling model (#718) 2023-10-06 16:06:20 -04:00
Bruce MacDonald
d6786f2945 add feedback for reading model metadata (#722) 2023-10-06 16:05:32 -04:00
Michael Yang
38dc2f79bc Merge pull request #626 from jmorganca/mxyng/concurrent-downloads
parallel chunked downloads
2023-10-06 13:01:29 -07:00
Michael Yang
cb961c87ca Merge pull request #679 from jamesbraza/modelfile-docs
`Modelfile` syntax highlighting
2023-10-06 12:59:45 -07:00
Michael Yang
0560b28a8d names 2023-10-06 12:56:56 -07:00
Michael Yang
10199c5987 replace done channel with file check 2023-10-06 12:56:56 -07:00
Michael Yang
288814d3e4 fix ref counts 2023-10-06 12:56:43 -07:00
Michael Yang
04733438da check head request response 2023-10-06 12:56:43 -07:00
Michael Yang
711e891f0f fix resumable downloads
glob returns files in lexical order which is not appropriate when
rebuilding the parts list
2023-10-06 12:56:43 -07:00
Michael Yang
090d08422b handle unexpected eofs 2023-10-06 12:56:43 -07:00
Michael Yang
5b84404c64 handle concurrent requests for the same blobs 2023-10-06 12:56:43 -07:00
Michael Yang
8544edca21 parallel chunked downloads 2023-10-06 12:56:43 -07:00
Bruce MacDonald
5d22319a2c rename server subprocess (#700)
- this makes it easier to see that the subprocess is associated with ollama
2023-10-06 10:15:42 -04:00
Bruce MacDonald
2130c0708b output type parsed from modelfile (#678) 2023-10-05 14:58:04 -04:00
Patrick Devine
61ff1946e6 revise help text (#706) 2023-10-05 11:36:07 -07:00
Bruce MacDonald
d06bc0cb6e enable q8, q5, 5_1, and f32 for linux gpu (#699) 2023-10-05 12:53:47 -04:00
Alexander F. Rødseth
d104b7e997 Fix go test./... issue: fmt.Println arg list ends with redundant newline (#705) 2023-10-05 11:11:04 -04:00
Bruce MacDonald
9e2de1bd2c increase streaming buffer size (#692) 2023-10-04 14:09:00 -04:00
Jeffrey Morgan
dc87e9c9ae update Dockerfile to pass GOFLAGS 2023-10-03 07:05:15 -07:00
Michael Yang
367cb68dc1 Merge pull request #686 from jmorganca/mxyng/starcoder
decode starcoder
2023-10-02 22:47:19 -07:00
Michael Yang
c02c0cd483 starcoder 2023-10-02 19:56:51 -07:00
Patrick Devine
1852755154 show a default message when license/parameters/system prompt/template aren't specified (#681) 2023-10-02 14:34:52 -07:00
James Braza
6f2ce74231 Got rif of all caps to show it can be lower case 2023-10-02 13:54:27 -07:00
James Braza
6edcc5c79f Using code highlighting syntax around Modelfile 2023-10-02 13:46:05 -07:00
Bruce MacDonald
b1f7123301 clean up num_gpu calculation code (#673) 2023-10-02 14:53:42 -04:00
Bruce MacDonald
1fbf3585d6 Relay default values to llama runner (#672)
* include seed in params for llama.cpp server and remove empty filter for temp

* relay default predict options to llama.cpp

- reorganize options to match predict request for readability

* omit empty stop

---------

Co-authored-by: hallh <hallh@users.noreply.github.com>
2023-10-02 14:53:16 -04:00
Patrick Devine
99d5161e8a don't wordwrap when stdout is redirected or piped (#662) 2023-10-02 11:50:55 -07:00
Michael
ea8380be45 add community project: Chatbot Ollama
add community project: Chatbot Ollama by @ivanfioravanti
2023-10-02 09:04:31 -07:00
Jeffrey Morgan
4f25092dc1 fix build_docker.sh permissions 2023-10-01 16:42:32 -07:00
Jiayu Liu
4fc10acce9 add some missing code directives in docs (#664) 2023-10-01 11:51:01 -07:00
Michael Yang
0a4f21c0a7 fix docker build (#659) 2023-09-30 13:34:01 -07:00
Jeffrey Morgan
9abb66254a docker: fix volume permission errors 2023-09-30 12:32:15 -07:00
Jay Nakrani
1d0ebe67e8 Document response stream chunk delimiter. (#632)
Document response stream chunk delimiter.
2023-09-29 21:45:52 -07:00
Bruce MacDonald
a1b2d95f96 remove unused push/pull params (#650) 2023-09-29 17:27:19 -04:00
Michael Yang
c0b1bf7537 Merge pull request #606 from jmorganca/mxyng/install.sh-2
ordered list of install locations
2023-09-29 11:30:46 -07:00
Michael Yang
cdfeb165ca Merge pull request #608 from jmorganca/mxyng/build
update build scripts
2023-09-29 11:30:25 -07:00
Michael Yang
92d454ec5f update build_darwin.sh 2023-09-29 11:29:23 -07:00
Michael Yang
9333b0cc82 Merge pull request #612 from jmorganca/mxyng/prune-empty-directories
prune empty directories
2023-09-29 11:23:39 -07:00
Bruce MacDonald
9771b1ec51 windows runner fixes (#637) 2023-09-29 11:47:55 -04:00
Patrick Devine
76db4a49cf allow the user to cancel generating with ctrl-C (#641) 2023-09-28 17:13:01 -07:00
Luc Stepniewski
4aa0976a2e Added missing return preventing SIGSEGV because of missing resp (#621)
Co-authored-by: Luc Stepniewski <luc@eclipse-fr.com>
2023-09-28 14:25:22 -07:00
Patrick Devine
92c20fdae6 fix error messages for unknown commands in the repl (#611) 2023-09-28 14:19:45 -07:00
Michael Yang
c951da7096 Merge pull request #634 from jmorganca/mxyng/int64
use int64 consistently
2023-09-28 14:17:47 -07:00
Bruce MacDonald
24d82a23a2 do not download updates multiple times (#633) 2023-09-28 15:29:17 -04:00
Michael Yang
f40b3de758 use int64 consistently 2023-09-28 11:07:24 -07:00
Michael
5f4008c296 Update README.md
adding in instruction to run mistral
2023-09-28 09:06:03 -07:00
Aaron Coffey
6ae33d8141 Update modelfile.md to reflect the usage of num_gpu. (#629) 2023-09-28 10:21:21 -04:00
Jeffrey Morgan
c5664c1fef Update faq.md 2023-09-27 13:49:43 -07:00
Bruce MacDonald
958a5a8184 revert fedora cuda version check 2023-09-27 15:12:29 -04:00
Michael Yang
8608eb4760 prune empty directories 2023-09-27 10:58:09 -07:00
Bruce MacDonald
a2b210130f fedora install fixes (#609) 2023-09-27 11:43:47 -04:00
Bruce MacDonald
ed20837f9a Update modelfile.md 2023-09-27 10:38:10 -04:00
James Braza
1db2a61dd0 Added num_predict to the options table (#614) 2023-09-27 10:26:08 -04:00
Jeffrey Morgan
2ded8ab206 use 11.8.0 nvidia dockerfile base image for now 2023-09-26 21:48:41 -07:00
Michael Yang
e6b3648bbf Merge pull request #616 from jmorganca/mxyng/fix-model-name 2023-09-26 20:54:18 -07:00
Michael Yang
0625e805f0 fix model name not matching 2023-09-26 19:50:04 -07:00
Michael Yang
c38ec5befb Merge pull request #598 from jmorganca/mxyng/help-exit
add painter message for exit
2023-09-26 15:17:40 -07:00
Michael Yang
c577721a43 Merge pull request #605 from jmorganca/mxyng/install.sh
do not unload nouveau driver
2023-09-26 09:53:05 -07:00
Michael Yang
29c056ea39 ordered list of install locations 2023-09-26 09:38:11 -07:00
Michael Yang
9fc3bba9cf do no unload nouveau driver 2023-09-26 09:36:54 -07:00
Michael Chiang
7774ed4ae6 Update README.md for linux + cleanup (#601)
Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2023-09-25 23:44:53 -07:00
Michael Yang
11f920f209 Merge pull request #599 from jmorganca/mxyng/install.sh
update install.sh
2023-09-25 18:24:13 -07:00
Michael Yang
6e6b655956 update install.sh 2023-09-25 18:09:44 -07:00
Michael Yang
110ae89a6c Merge pull request #596 from jmorganca/mxyng/install.sh
update install.sh
2023-09-25 17:59:13 -07:00
Michael Yang
5e388f931e check cuda installed before installing 2023-09-25 17:56:43 -07:00
Michael Yang
d5ad41dd7b fix path for wsl user 2023-09-25 17:56:25 -07:00
Michael Yang
d294a11bc9 start service on exit instead of immediately 2023-09-25 17:54:02 -07:00
Michael Yang
93d887e4bc add painter message for exit 2023-09-25 16:30:22 -07:00
Jeffrey Morgan
5306b0269d Update linux.md 2023-09-25 16:10:32 -07:00
Michael Yang
7de0c8345d Merge pull request #595 from jmorganca/mxyng/install.sh
ignore systemctl is-system-running exit code
2023-09-25 15:49:47 -07:00
Michael Yang
1b9dcab3ab ignore systemctl is-system-running exit code 2023-09-25 15:47:45 -07:00
Bruce MacDonald
86279f4ae3 unbound max num gpu layers (#591)
---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-25 18:36:46 -04:00
Michael Yang
b934bf23e6 exit on unknown distro (#594) 2023-09-25 15:30:58 -07:00
Michael Yang
2b8ef455ad Merge pull request #593 from jmorganca/mxyng/install.sh
update install.sh
2023-09-25 14:09:40 -07:00
Michael Yang
0c5f47177c update install.sh 2023-09-25 14:01:44 -07:00
Michael Yang
1210db2924 Merge pull request #592 from jmorganca/mxyng/install.sh
fix dkms on debian
2023-09-25 12:59:01 -07:00
Michael Yang
d0854bf1e6 fix dkms on debian 2023-09-25 12:57:25 -07:00
Michael Yang
8396463255 Merge pull request #590 from jmorganca/mxyng/install.sh
fix dkms install
2023-09-25 12:17:31 -07:00
Michael Yang
a027bbf4d7 fix dkms install 2023-09-25 12:16:41 -07:00
Michael Yang
ed94a3dd02 Merge pull request #589 from jmorganca/mxyng/install.sh
update install.sh
2023-09-25 11:08:25 -07:00
Michael Yang
f14f62ab3b update install.sh 2023-09-25 11:05:38 -07:00
Jeffrey Morgan
0fb5268496 Update linux.md 2023-09-25 10:06:23 -07:00
Bruce MacDonald
c65edb1506 fix linux installer warning logs (#588) 2023-09-25 11:22:56 -04:00
Twan L
1605af32ec Added a new community project (#574) 2023-09-25 10:40:59 -04:00
Jeffrey Morgan
ee3032ad89 improvements to docs/linux.md 2023-09-24 21:50:07 -07:00
Jeffrey Morgan
5b7a27281d improvements to docs/linux.md 2023-09-24 21:38:23 -07:00
Jeffrey Morgan
d2a784e33e add docs/linux.md 2023-09-24 21:34:44 -07:00
Jeffrey Morgan
413a2e4f91 set DEBIAN_FRONTEND=noninteractive correctly 2023-09-24 20:35:42 -07:00
Matt Williams
a92fdff620 add the example for ask the mentors
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-09-24 15:58:32 -07:00
Patrick Devine
b5614f3ebc fix end-of-line issue with the new prompt (#582) 2023-09-23 17:20:30 -07:00
Jeffrey Morgan
8b2ba9cab8 minor improvements to install.sh 2023-09-23 11:20:39 -04:00
Jeffrey Morgan
e29662ab5c fix minor install script issues on debian 2023-09-23 10:25:47 -04:00
Bruce MacDonald
cbc40aa996 debian installer support (#579)
* debian installer support

- normalize os name to lowercase
- check needed commands are available
- dont check sudo when root user
- share common install commands
- support debian cuda install
- skip aarm cuda install
- system user shared home dir

* refactor and add other platforms (#580)

---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-23 09:46:47 -04:00
Jeffrey Morgan
5cb82540c9 install.sh: update install url 2023-09-23 09:35:14 -04:00
Jeffrey Morgan
d7849a1dc9 add .env to .dockerignore 2023-09-23 00:53:48 -04:00
Jeffrey Morgan
01c44d687e add multi line strings to final prompt 2023-09-23 00:27:24 -04:00
Jeffrey Morgan
9b12a511ca check other request fields before load short circuit in /api/generate 2023-09-22 23:50:55 -04:00
Jeffrey Morgan
e20362e0d5 fix multi line input in ollama run 2023-09-22 23:49:35 -04:00
Patrick Devine
c928ceb927 add word wrapping for lines which are longer than the terminal width (#553) 2023-09-22 13:36:08 -07:00
Michael Yang
e1a0846483 Merge pull request #571 from jmorganca/mxyng/update-dockerfile
update dockerfile.cuda
2023-09-22 12:34:41 -07:00
Jeffrey Morgan
f997e29e45 Add Dockerfile.build for building linux binaries (#558)
Add `Dockerfile.build` for building linux binaries

---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-22 15:20:12 -04:00
Patrick Devine
87d9efb364 switch to forked readline lib which doesn't wreck the repl prompt (#578) 2023-09-22 12:17:45 -07:00
Michael Yang
93d3a2568d replace dockerfile 2023-09-22 11:57:38 -07:00
Michael Yang
5a81390b24 update dockerfile.cuda 2023-09-22 11:57:38 -07:00
Michael Yang
a89ef99aed Merge pull request #575 from jmorganca/mxyng/fix-ipv6-only
fix ipv6 parse ip
2023-09-22 11:47:11 -07:00
Bruce MacDonald
dc0c725ceb ubuntu cuda drivers (#576) 2023-09-22 19:43:14 +01:00
Bruce MacDonald
5d71bda478 close llm on interrupt (#577) 2023-09-22 19:41:52 +01:00
Michael Yang
88897a90e4 fix ipv6 parse ip 2023-09-22 10:41:32 -07:00
Bruce MacDonald
9df31c3518 linux installer script (#534)
Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-22 17:01:03 +01:00
Michael Yang
2044f9d4da Merge pull request #570 from jmorganca/mxyng/head-request
fix HEAD request
2023-09-21 16:56:17 -07:00
Michael Yang
0d186f3b33 Merge pull request #569 from jmorganca/mxyng/update-submodules
silence warm up log
2023-09-21 16:52:42 -07:00
Michael Yang
82f5b66c01 register HEAD /api/tags 2023-09-21 16:38:03 -07:00
Michael Yang
c986694367 fix HEAD / request
HEAD request should respond like their GET counterparts except without a
response body.
2023-09-21 16:35:58 -07:00
Michael Yang
058d0cd04b silence warm up log 2023-09-21 14:53:33 -07:00
Michael Yang
ee1c994d15 update submodule (#567) 2023-09-21 16:22:23 -04:00
Bruce MacDonald
4cba75efc5 remove tmp directories created by previous servers (#559)
* remove tmp directories created by previous servers

* clean up on server stop

* Update routes.go

* Update server/routes.go

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>

* create top-level temp ollama dir

* check file exists before creating

---------

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-21 20:38:49 +01:00
Michael Yang
8c83701e9f Merge pull request #566 from jmorganca/mxyng/api-check-model-exists
Use API to check if model exists and pull if necessary
2023-09-21 10:35:14 -07:00
Michael Yang
6137b12799 validate existence and pull model using api 2023-09-21 09:55:34 -07:00
Michael Yang
1fabba474b refactor default allow origins
this should be less error prone
2023-09-21 09:42:25 -07:00
Michael Yang
765770efdb Merge pull request #562 from jmorganca/mxyng/fix-ollama-host
fix OLLAMA_HOST parsing for ip6
2023-09-20 19:54:47 -07:00
Michael Yang
9297ff8330 fix OLLAMA_HOST parsing for ip6 2023-09-20 18:52:57 -07:00
Michael Yang
ee4fd16f2c Merge pull request #556 from jmorganca/pack-cuda
pack in cuda libs
2023-09-20 15:02:36 -07:00
Michael Yang
a9ed7cc6aa rename generate.go 2023-09-20 14:42:17 -07:00
Michael Yang
6c6a31a1e8 embed libraries using cmake 2023-09-20 14:41:57 -07:00
Bruce MacDonald
fc6ec356fc remove libcuda.so 2023-09-20 20:36:14 +01:00
Bruce MacDonald
1255bc9b45 only package 11.8 runner 2023-09-20 20:00:41 +01:00
Michael Yang
084e4c782a Merge pull request #557 from jmorganca/mxyng/cleanup
fix impossible condition
2023-09-20 11:51:01 -07:00
Michael Yang
58ffa03d8b fix impossible condition 2023-09-20 11:27:44 -07:00
Michael Yang
637f8bc6a5 Merge pull request #536 from jmorganca/mxyng/redirect-uploads
explicitly follow upload redirects
2023-09-20 11:27:03 -07:00
Michael Yang
499e9007a5 pick chunksize based on location 2023-09-20 11:10:24 -07:00
Bruce MacDonald
b9bb5ca288 use cuda_version 2023-09-20 17:58:16 +01:00
Bruce MacDonald
4e8be787c7 pack in cuda libs 2023-09-20 17:40:42 +01:00
Michael Yang
aa45d7c1df draft: explicitly follow upload redirects 2023-09-19 13:36:58 -07:00
Michael Yang
e35565c567 Merge pull request #555 from jmorganca/mxyng/fix-windows-startup
fix build
2023-09-19 10:51:58 -07:00
Michael Yang
a5520bfb42 fix build 2023-09-19 10:42:24 -07:00
Michael Yang
2627c464ba Merge pull request #554 from jmorganca/mxyng/fix-windows-startup
fix mkdir on windows
2023-09-19 09:42:12 -07:00
Michael Yang
b58d5d16b0 fix mkdir on windows 2023-09-19 09:41:13 -07:00
Patrick Devine
24580df958 only add a layer if there is actual data (#535) 2023-09-18 13:47:45 -07:00
Patrick Devine
80dd44e80a Cmd changes (#541) 2023-09-18 12:26:56 -07:00
James Braza
94e1d96b29 Updated README section on community projects for table (#550) 2023-09-18 15:22:50 -04:00
Bruce MacDonald
66003e1d05 subprocess improvements (#524)
* subprocess improvements

- increase start-up timeout
- when runner fails to start fail rather than timing out
- try runners in order rather than choosing 1 runner
- embed metal runner in metal dir rather than gpu
- refactor logging and error messages

* Update llama.go

* Update llama.go

* simplify by using glob
2023-09-18 15:16:32 -04:00
Michael Yang
c345053a8b Merge pull request #537 from jmorganca/mxyng/upload
fix error on upload chunk
2023-09-15 17:48:39 -07:00
Michael Yang
08d7c2a944 fix error on upload chunk 2023-09-15 15:59:30 -07:00
Michael Yang
bc9573dcb1 Merge pull request #530 from jmorganca/mxyng/progresswriter
implement ProgressWriter
2023-09-15 12:43:46 -07:00
Michael Yang
e53bc57d4d split uploadBlobChunked 2023-09-14 17:22:05 -07:00
Michael Yang
f0b398d17f implement ProgressWriter 2023-09-14 17:22:04 -07:00
Patrick Devine
8efbc5df55 DRAFT: add a simple python client to access ollama (#522) 2023-09-14 16:37:38 -07:00
Michael Yang
ccc3e9ac6d Merge pull request #531 from jmorganca/mxyng/content-length
set request.ContentLength
2023-09-14 13:33:11 -07:00
Michael Yang
daa4f096f9 set request.ContentLength
This informs the HTTP client the content length is known and disables
chunked Transfer-Encoding
2023-09-14 13:32:44 -07:00
Michael Yang
3ee85f1c6c Merge pull request #526 from jmorganca/mxyng/cleanup
remove unused
2023-09-14 13:10:59 -07:00
Bruce MacDonald
2540c9181c support for packaging in multiple cuda runners (#509)
* enable packaging multiple cuda versions
* use nvcc cuda version if available

---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-14 15:08:13 -04:00
Michael Yang
83ffb154bc Merge pull request #507 from jmorganca/mxyng/build
update docker image
2023-09-14 11:25:59 -07:00
Michael Yang
9aa192c812 update cuda docker image 2023-09-14 11:25:20 -07:00
Matt Williams
fc8707686f Update API docs (#527)
* Update API docs

Signed-off-by: Matt Williams <m@technovangelist.com>

* strange TOC was getting auto generated

Signed-off-by: Matt Williams <m@technovangelist.com>

* Update docs/api.md

Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>

* Update docs/api.md

Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>

* Update docs/api.md

Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>

* Update api.md

---------

Signed-off-by: Matt Williams <m@technovangelist.com>
Co-authored-by: Bruce MacDonald <brucewmacdonald@gmail.com>
Co-authored-by: Michael Chiang <mchiang0610@users.noreply.github.com>
2023-09-14 08:51:26 -07:00
Michael Yang
f89c23764b Merge pull request #525 from jmorganca/mxyng/falcon-decode
fix: add falcon.go
2023-09-13 15:08:47 -07:00
Michael Yang
e6881cabd0 remove unused 2023-09-13 14:48:33 -07:00
Michael Yang
d028853879 fix: add falcon.go 2023-09-13 14:47:37 -07:00
Michael Yang
949553db23 Merge pull request #519 from jmorganca/mxyng/decode
Mxyng/decode
2023-09-13 12:43:57 -07:00
Michael Yang
0c5a454361 fix model type for 70b 2023-09-12 15:12:59 -07:00
Bruce MacDonald
f59c4d03f7 fix ggml arm64 cuda build (#520) 2023-09-12 17:06:48 -04:00
Michael Yang
7dee25a07f fix falcon decode
get model and file type from bin file
2023-09-12 12:34:53 -07:00
Bruce MacDonald
f221637053 first pass at linux gpu support (#454)
* linux gpu support
* handle multiple gpus
* add cuda docker image (#488)
---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-12 11:04:35 -04:00
Patrick Devine
45ac07cd02 create the blobs directory correctly (#508) 2023-09-11 14:54:52 -07:00
Jeffrey Morgan
7d749cc787 fix darwin build script 2023-09-11 16:31:46 -04:00
Patrick Devine
e7e91cd71c add autoprune to remove unused layers (#491) 2023-09-11 11:46:35 -07:00
Jeffrey Morgan
3920e15386 add model format to config layer (#497) 2023-09-09 17:53:44 -04:00
Michael Yang
41e976edde Merge pull request #492 from jmorganca/mxyng/nil-pointer
fix nil pointer dereference
2023-09-07 17:25:23 -07:00
Michael Yang
de227b620f fix nil pointer dereference 2023-09-07 17:24:31 -07:00
Michael Yang
63def6ca49 Merge pull request #487 from jmorganca/mxyng/dockerignore
update dockerignore
2023-09-07 14:16:17 -07:00
Michael Yang
738fe9c4aa Merge pull request #486 from jmorganca/mxyng/fix-push
fix: retry push on expired token
2023-09-07 13:58:34 -07:00
Michael Yang
a8da0bacbe update dockerignore 2023-09-07 13:36:25 -07:00
Michael Yang
bf146fb072 fix retry on unauthorized chunk 2023-09-07 12:02:04 -07:00
Michael Yang
f0f4943577 fix get auth token 2023-09-07 12:01:56 -07:00
Bruce MacDonald
09dd2aeff9 GGUF support (#441) 2023-09-07 13:55:37 -04:00
Alexander Pepper
07b4074e7b [docs] Improve build instructions (#482)
Go is required and not installed by default.
2023-09-07 06:43:26 -04:00
Jeffrey Morgan
61dda6a5e0 set minimum CMAKE_OSX_DEPLOYMENT_TARGET to 11.0 2023-09-06 19:56:50 -04:00
Michael Yang
e1f9ced568 Merge pull request #479 from jmorganca/mxyng/dockerfile
update dockerfile
2023-09-06 15:44:24 -07:00
Michael Yang
9795b43d93 update dockerfile 2023-09-06 15:31:25 -07:00
Michael Yang
0980d5c7e3 Merge pull request #478 from jmorganca/mxyng/cleanup
remove unused openssh key types
2023-09-06 15:18:54 -07:00
Michael Yang
0dae34b6a7 remove unused openssh key types 2023-09-06 14:34:09 -07:00
Michael Yang
83c6be1666 fix model manifests (#477) 2023-09-06 17:30:08 -04:00
Patrick Devine
1adfa67589 tighten up the error string for ollama show flags (#476) 2023-09-06 13:38:49 -07:00
Patrick Devine
790d24eb7b add show command (#474) 2023-09-06 11:04:17 -07:00
Jeffrey Morgan
7de300856b use osPath in gpu check 2023-09-05 21:52:21 -04:00
Jeffrey Morgan
213ffdb548 macos amd64 compatibility fixes 2023-09-05 21:33:31 -04:00
Michael Yang
d42d88386a Merge pull request #473 from jmorganca/mxyng/fix-manifest-path
create manifests directory
2023-09-05 17:37:41 -07:00
Ackermann Yuriy
154f24af91 Added missing options params to the embeddings docs (#472) 2023-09-05 20:18:49 -04:00
Michael Yang
a1ecdd36d5 create manifests directory 2023-09-05 17:10:40 -07:00
Bruce MacDonald
d18282bfda metal: add missing barriers for mul-mat (#469) 2023-09-05 19:37:13 -04:00
Michael Yang
9ae76ba8c9 Merge pull request #471 from jmorganca/mxyng/fix-empty-response
fix empty response
2023-09-05 15:23:05 -07:00
Michael Yang
2bc06565c7 fix empty response 2023-09-05 15:03:24 -07:00
Michael Yang
d1c2558f7e Merge pull request #461 from jmorganca/mxyng/fix-inherit-params
fix inherit params
2023-09-05 12:30:23 -07:00
Michael Yang
7b5aefb427 Merge pull request #462 from jmorganca/mxyng/rm-marshal-prompt
remove marshalPrompt which is no longer needed
2023-09-05 11:48:41 -07:00
Michael Yang
06ef90c051 fix parameter inheritence
parameters are not inherited because they are processed differently from
other layer. fix this by explicitly merging the inherited params into
the new params. parameter values defined in the new modelfile will
override those defined in the inherited modelfile. array lists are
replaced instead of appended
2023-09-05 11:40:20 -07:00
Michael Yang
7efbc84320 Merge pull request #464 from jmorganca/mxyng/fix-num-keep
fix num_keep
2023-09-05 11:30:45 -07:00
Michael Yang
e9f6df7dca use slices.DeleteFunc 2023-09-05 09:56:59 -07:00
Jeffrey Morgan
7fa6e51686 generate binary dependencies based on GOARCH on macos (#459) 2023-09-05 12:53:57 -04:00
Michael Yang
8dc68417e7 Merge pull request #463 from jmorganca/mxyng/fix-last-token
fix not forwarding last token
2023-09-05 09:01:32 -07:00
Michael Yang
681f3c4c42 fix num_keep 2023-09-03 17:47:49 -04:00
Michael Yang
59a705525c fix not forwarding last token 2023-09-03 17:46:50 -04:00
Michael Yang
5d3f314b0b remove marshalPrompt which is no longer needed 2023-09-03 17:01:05 -04:00
Michael Yang
adaa13088b Merge pull request #457 from sqs/dont-html-escape-prompt
do not HTML-escape prompt
2023-09-01 17:41:53 -07:00
Quinn Slack
62d29b2157 do not HTML-escape prompt
The `html/template` package automatically HTML-escapes interpolated strings in templates. This behavior is undesirable because it causes prompts like `<h1>hello` to be escaped to `&lt;h1&gt;hello` before being passed to the LLM.

The included test case passes, but before the code change, it failed:

```
--- FAIL: TestModelPrompt
    images_test.go:21: got "a&lt;h1&gt;b", want "a<h1>b"
```
2023-09-01 17:16:38 -05:00
Michael Yang
ed19d10aa5 update readme (#451)
* update readme

* readme: more run examples
2023-09-01 16:44:14 -04:00
Michael Yang
36c2f45c40 Merge pull request #450 from jmorganca/mxyng/update-readme
update readme
2023-09-01 08:21:49 -07:00
Michael Yang
742226625f update readme 2023-09-01 10:54:31 -04:00
Matt Williams
6bb8a16ccb Merge pull request #273 from jmorganca/matt/moreexamples
Create a sentiments example
2023-08-31 16:31:59 -07:00
Jeffrey Morgan
a5dbcf2e73 app: dont package ggml-metal.metal 2023-08-31 17:41:09 -04:00
Michael Yang
9304f0e7a8 Merge pull request #443 from jmorganca/mxyng/fix-list-models
windows: fix filepath bugs
2023-08-31 14:19:10 -07:00
Michael Yang
6578b2f8a1 Merge pull request #448 from callmephilip/patch-1
fix spelling errors in example prompts
2023-08-31 08:57:07 -07:00
Michael Yang
1c8fd627ad windows: fix create modelfile 2023-08-31 09:47:10 -04:00
Michael Yang
ae950b00f1 windows: fix delete 2023-08-31 09:47:10 -04:00
Michael Yang
eeb40a672c fix list models for windows 2023-08-31 09:47:10 -04:00
Michael Yang
0f541a0367 s/ListResponseModel/ModelResponse/ 2023-08-31 09:47:10 -04:00
Philip Nuzhnyi
1363f537ce fix spelling errors in prompt 2023-08-31 10:02:46 +01:00
Jeffrey Morgan
bc3e21fdc6 update README.md 2023-08-30 17:56:14 -04:00
Jeffrey Morgan
a82eb275ff update docs for subprocess 2023-08-30 17:54:02 -04:00
Bruce MacDonald
f964aea9a2 remove test not applicate to subprocess 2023-08-30 16:36:11 -04:00
Bruce MacDonald
42998d797d subprocess llama.cpp server (#401)
* remove c code
* pack llama.cpp
* use request context for llama_cpp
* let llama_cpp decide the number of threads to use
* stop llama runner when app stops
* remove sample count and duration metrics
* use go generate to get libraries
* tmp dir for running llm
2023-08-30 16:35:03 -04:00
Quinn Slack
f4432e1dba treat stop as stop sequences, not exact tokens (#442)
The `stop` option to the generate API is a list of sequences that should cause generation to stop. Although these are commonly called "stop tokens", they do not necessarily correspond to LLM tokens (per the LLM's tokenizer). For example, if the caller sends a generate request with `"stop":["\n"]`, then generation should stop on any token containing `\n` (and trim `\n` from the output), not just if the token exactly matches `\n`. If `stop` were interpreted strictly as LLM tokens, then it would require callers of the generate API to know the LLM's tokenizer and enumerate many tokens in the `stop` list.

Fixes https://github.com/jmorganca/ollama/issues/295.
2023-08-30 11:53:42 -04:00
Michael Yang
982c535428 Merge pull request #428 from jmorganca/mxyng/upload-chunks
update upload chunks
2023-08-30 07:47:17 -07:00
Michael Yang
7df342a6ea Merge pull request #421 from jmorganca/mxyng/f16-metal
allow F16 to use metal
2023-08-29 06:32:59 -07:00
Patrick Devine
8bbff2df98 add model IDs (#439) 2023-08-28 20:50:24 -07:00
Michael Yang
16b06699fd remove unused parameter 2023-08-28 18:35:18 -04:00
Michael Yang
246dc65417 loosen http status code checks 2023-08-28 18:34:53 -04:00
Michael Yang
865fceb73c chunked pipe 2023-08-28 18:34:53 -04:00
Michael Yang
72266c7684 bump chunk size to 95MB 2023-08-28 18:34:53 -04:00
Jeffrey Morgan
d3b838ce60 update orca to orca-mini 2023-08-27 13:26:30 -04:00
Michael Yang
e639a12fa1 Merge pull request #412 from jmorganca/mxyng/update-readme
update README.md
2023-08-26 21:26:34 -07:00
Michael Yang
e82fcf30c6 Merge pull request #420 from jmorganca/mxyng/34b-mem-check
add 34b to mem check
2023-08-26 14:15:52 -07:00
Michael Yang
495e8b0a6a Merge pull request #426 from jmorganca/default-template
set default template
2023-08-26 14:15:38 -07:00
Michael Yang
59734ca24d set default template 2023-08-26 12:20:48 -07:00
Jeffrey Morgan
22ab7f5f88 default host to 127.0.0.1, fixes #424 2023-08-26 11:59:28 -07:00
Michael Yang
b25dd1795d allow F16 to use metal
warning F16 uses significantly more memory than quantized model so the
standard requires don't apply.
2023-08-26 08:38:48 -07:00
Michael Yang
304f2b6c96 add 34b to mem check 2023-08-26 08:29:21 -07:00
Quinn Slack
2ecc3a33c3 delete all models (not just 1st) in ollama rm (#415)
Previously, `ollama rm model1 model2 modelN` would only delete `model1`. The other model command-line arguments would be silently ignored. Now, all models mentioned are deleted.
2023-08-26 00:47:56 -07:00
Jeffrey Morgan
ee6e1df118 add codellama to model list in readme 2023-08-25 20:44:26 -07:00
Jeffrey Morgan
177b69a211 add missing entries for 34B 2023-08-25 18:35:35 -07:00
Michael Yang
dad63f0821 Merge pull request #411 from jmorganca/mxyng/34b
patch llama.cpp for 34B
2023-08-25 11:59:05 -07:00
Michael Yang
041f9ad1a1 update README.md 2023-08-25 11:44:25 -07:00
Michael Yang
7a378f8b66 patch llama.cpp for 34B 2023-08-25 10:06:55 -07:00
Michael Yang
de0bdd7f29 Merge pull request #405 from jmorganca/mxyng/34b
add 34b model type
2023-08-24 10:37:22 -07:00
Michael Yang
b1cececb8e add 34b model type 2023-08-24 10:35:44 -07:00
Michael Yang
e0d39fa3bf Merge pull request #398 from jmorganca/mxyng/cleanup
Mxyng/cleanup
2023-08-22 15:51:41 -07:00
Michael Yang
968ced2e71 Merge pull request #393 from jmorganca/mxyng/net-url
use url.URL
2023-08-22 15:51:33 -07:00
Michael Yang
32d1a00017 remove unused requestContextKey 2023-08-22 10:49:54 -07:00
Michael Yang
04e2128273 move upload funcs to upload.go 2023-08-22 10:49:53 -07:00
Michael Yang
2cc634689b use url.URL 2023-08-22 10:49:07 -07:00
Michael Yang
8f827641b0 Merge pull request #397 from jmorganca/mxyng/release-mode
build release mode
2023-08-22 10:48:44 -07:00
Michael Yang
95187d7e1e build release mode 2023-08-22 09:52:43 -07:00
Michael Yang
9ec7e37534 Merge pull request #392 from jmorganca/mxyng/version
add version
2023-08-22 09:50:25 -07:00
Michael Yang
2c7f956b38 add version 2023-08-22 09:40:58 -07:00
Jeffrey Morgan
a9f6c56652 fix FROM instruction erroring when referring to a file 2023-08-22 09:39:42 -07:00
Ryan Baker
0a892419ad Strip protocol from model path (#377) 2023-08-21 21:56:56 -07:00
Jeffrey Morgan
e3054fc74e add .env to .dockerignore 2023-08-21 09:32:02 -07:00
Michael Yang
23c2485044 Merge pull request #381 from jmorganca/mxyng/fix-push-chunks
retry on unauthorized chunk push
2023-08-18 13:49:25 -07:00
Michael Yang
386c66f285 Merge pull request #378 from jmorganca/mxyng/copy-metadata-from-source
copy metadata from source
2023-08-18 13:49:09 -07:00
Michael Yang
3b49315f97 retry on unauthorized chunk push
The token printed for authorized requests has a lifetime of 1h. If an
upload exceeds 1h, a chunk push will fail since the token is created on
a "start upload" request.

This replaces the Pipe with SectionReader which is simpler and
implements Seek, a requirement for makeRequestWithRetry. This is
slightly worse than using a Pipe since the progress update is directly
tied to the chunk size instead of controlled separately.
2023-08-18 11:23:47 -07:00
Michael Yang
5ca05c2e88 fix ModelType() 2023-08-18 11:23:38 -07:00
Michael Yang
7eda70f23b copy metadata from source 2023-08-17 21:55:25 -07:00
Matt Williams
da36196d79 Update the modelfile
needed to override the system prompt
from orca and make it easier for a downstream
user to define their system prompt

Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-04 08:11:24 -07:00
Matt Williams
42903973b7 Added an example to generate a list of 10 tweets
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-03 17:26:05 -07:00
Matt Williams
8f2df948ab Create a sentiments example
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-08-03 16:38:31 -07:00
150 changed files with 8678 additions and 46110 deletions

View File

@@ -1,7 +1,8 @@
build
llama/build
.venv
.vscode
ollama
app
web
dist
scripts
llm/llama.cpp/ggml
llm/llama.cpp/gguf
.env

3
.gitignore vendored
View File

@@ -5,5 +5,4 @@
.swp
dist
ollama
/ggml-metal.metal
build
ggml-metal.metal

10
.gitmodules vendored Normal file
View File

@@ -0,0 +1,10 @@
[submodule "llm/llama.cpp/ggml"]
path = llm/llama.cpp/ggml
url = https://github.com/ggerganov/llama.cpp.git
ignore = dirty
shallow = true
[submodule "llm/llama.cpp/gguf"]
path = llm/llama.cpp/gguf
url = https://github.com/ggerganov/llama.cpp.git
ignore = dirty
shallow = true

View File

@@ -1,40 +0,0 @@
cmake_minimum_required(VERSION 3.14) # 3.11 or later for FetchContent, but some features might require newer versions
project(llama_cpp)
include(FetchContent)
FetchContent_Declare(
llama_cpp_gguf
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git
GIT_TAG 6381d4e
)
FetchContent_Declare(
llama_cpp_ggml
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git
GIT_TAG dadbed9
)
FetchContent_MakeAvailable(llama_cpp_ggml)
add_subdirectory(${llama_cpp_ggml_SOURCE_DIR}/examples EXCLUDE_FROM_ALL)
add_executable(llama_cpp ${llama_cpp_ggml_SOURCE_DIR}/examples/server/server.cpp)
include_directories(${llama_cpp_ggml_SOURCE_DIR})
include_directories(${llama_cpp_ggml_SOURCE_DIR}/examples)
target_compile_features(llama_cpp PRIVATE cxx_std_11)
target_link_libraries(llama_cpp PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
if (APPLE)
add_executable(llama_cpp_metal ${llama_cpp_ggml_SOURCE_DIR}/examples/server/server.cpp)
target_compile_options(llama_cpp_metal PRIVATE -DLLAMA_STATIC=ON -DLLAMA_METAL=ON -DGGML_USE_METAL=1)
target_compile_features(llama_cpp_metal PRIVATE cxx_std_11)
target_link_libraries(llama_cpp_metal PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
configure_file(${llama_cpp_SOURCE_DIR}/ggml-metal.metal ${CMAKE_BINARY_DIR}/ggml-metal.metal COPYONLY)
else()
add_executable(llama_cpp_cublas ${llama_cpp_ggml_SOURCE_DIR}/examples/server/server.cpp)
target_compile_definitions(llama_cpp_cublas PRIVATE -DLLAMA_STATIC=ON -DLLAMA_CUBLAS=ON)
target_compile_options(llama_cpp_cublas PRIVATE -DLLAMA_CUBLAS=ON -DLLAMA_STATIC=ON)
target_compile_features(llama_cpp_cublas PRIVATE cxx_std_11)
target_link_libraries(llama_cpp_cublas PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
endif()

View File

@@ -1,15 +1,23 @@
FROM golang:1.20
WORKDIR /go/src/github.com/jmorganca/ollama
COPY . .
RUN CGO_ENABLED=1 go build -ldflags '-linkmode external -extldflags "-static"' .
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
FROM alpine
ARG TARGETARCH
ARG GOFLAGS="'-ldflags=-w -s'"
WORKDIR /go/src/github.com/jmorganca/ollama
RUN apt-get update && apt-get install -y git build-essential cmake
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
COPY . .
ENV GOARCH=$TARGETARCH
ENV GOFLAGS=$GOFLAGS
RUN /usr/local/go/bin/go generate ./... \
&& /usr/local/go/bin/go build .
FROM ubuntu:22.04
RUN apt-get update && apt-get install -y ca-certificates
COPY --from=0 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
EXPOSE 11434
ARG USER=ollama
ARG GROUP=ollama
RUN addgroup -g 1000 $GROUP && adduser -u 1000 -DG $GROUP $USER
USER $USER:$GROUP
ENTRYPOINT ["/bin/ollama"]
ENV OLLAMA_HOST 0.0.0.0
ENTRYPOINT ["/bin/ollama"]
CMD ["serve"]

31
Dockerfile.build Normal file
View File

@@ -0,0 +1,31 @@
# centos7 amd64 dependencies
FROM --platform=linux/amd64 nvidia/cuda:11.3.1-devel-centos7 AS base-amd64
RUN yum install -y https://repo.ius.io/ius-release-el7.rpm centos-release-scl && \
yum update -y && \
yum install -y devtoolset-10-gcc devtoolset-10-gcc-c++ git236 wget
RUN wget "https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-x86_64.sh" -O cmake-installer.sh && chmod +x cmake-installer.sh && ./cmake-installer.sh --skip-license --prefix=/usr/local
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
# centos8 arm64 dependencies
FROM --platform=linux/arm64 nvidia/cuda-arm64:11.3.1-devel-centos8 AS base-arm64
RUN sed -i -e 's/mirrorlist/#mirrorlist/g' -e 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
RUN yum install -y git cmake
FROM base-${TARGETARCH}
ARG TARGETARCH
ARG GOFLAGS="'-ldflags -w -s'"
# install go
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
# build the final binary
WORKDIR /go/src/github.com/jmorganca/ollama
COPY . .
ENV GOOS=linux
ENV GOARCH=$TARGETARCH
ENV GOFLAGS=$GOFLAGS
RUN /usr/local/go/bin/go generate ./... && \
/usr/local/go/bin/go build .

201
README.md
View File

@@ -9,19 +9,32 @@
[![Discord](https://dcbadge.vercel.app/api/server/ollama?style=flat&compact=true)](https://discord.gg/ollama)
Run, create, and share large language models (LLMs).
Get up and running with large language models locally.
> Note: Ollama is in early preview. Please report any issues you find.
### macOS
## Download
[Download](https://ollama.ai/download/Ollama-darwin.zip)
- [Download](https://ollama.ai/download) for macOS
- Download for Windows and Linux (coming soon)
- Build [from source](#building)
### Windows
Coming soon!
### Linux & WSL2
```
curl https://ollama.ai/install.sh | sh
```
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
### Docker
The official [Ollama Docker image `ollama/ollama`](https://hub.docker.com/r/ollama/ollama)
is available on Docker Hub.
## Quickstart
To run and chat with [Llama 2](https://ai.meta.com/llama), the new model by Meta:
To run and chat with [Llama 2](https://ollama.ai/library/llama2):
```
ollama run llama2
@@ -29,53 +42,59 @@ ollama run llama2
## Model library
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library "ollama model library")
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library 'ollama model library')
Here are some example open-source models that can be downloaded:
Here are some example open-source models that can be downloaded:
| Model | Parameters | Size | Download |
| ------------------------ | ---------- | ----- | ------------------------------- |
| Llama2 | 7B | 3.8GB | `ollama pull llama2` |
| Llama2 13B | 13B | 7.3GB | `ollama pull llama2:13b` |
| Llama2 70B | 70B | 39GB | `ollama pull llama2:70b` |
| Llama2 Uncensored | 7B | 3.8GB | `ollama pull llama2-uncensored` |
| Orca Mini | 3B | 1.9GB | `ollama pull orca-mini` |
| Vicuna | 7B | 3.8GB | `ollama pull vicuna` |
| Nous-Hermes | 7B | 3.8GB | `ollama pull nous-hermes` |
| Nous-Hermes 13B | 13B | 7.3GB | `ollama pull nous-hermes:13b` |
| Wizard Vicuna Uncensored | 13B | 7.3GB | `ollama pull wizard-vicuna` |
| Model | Parameters | Size | Download |
| ------------------ | ---------- | ----- | ------------------------------ |
| Mistral | 7B | 4.1GB | `ollama run mistral` |
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
| Llama 2 13B | 13B | 7.3GB | `ollama run llama2:13b` |
| Llama 2 70B | 70B | 39GB | `ollama run llama2:70b` |
| Orca Mini | 3B | 1.9GB | `ollama run orca-mini` |
| Vicuna | 7B | 3.8GB | `ollama run vicuna` |
> Note: You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
## Examples
## Customize your own model
### Run a model
### Import from GGUF
```
ollama run llama2
>>> hi
Hello! How can I help you today?
```
Ollama supports importing GGUF models in the Modelfile:
For multiline input, you can wrap text with `"""`:
1. Create a file named `Modelfile`, with a `FROM` instruction with the local filepath to the model you want to import.
```
>>> """Hello,
... world!
... """
I'm a basic program that prints the famous "Hello, world!" message to the console.
```
```
FROM ./vicuna-33b.Q4_0.gguf
```
### Create a custom model
2. Create the model in Ollama
Pull a base model:
```
ollama create example -f Modelfile
```
3. Run the model
```
ollama run example
```
### Import from PyTorch or Safetensors
See the [guide](docs/import.md) on importing models for more information.
### Customize a prompt
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama2` model:
```
ollama pull llama2
```
> To update a model to the latest version, run `ollama pull llama2` again. The model will be updated (if necessary).
Create a `Modelfile`:
```
@@ -99,44 +118,84 @@ ollama run mario
Hello! It's your friend Mario.
```
For more examples, see the [examples](./examples) directory. For more information on creating a Modelfile, see the [Modelfile](./docs/modelfile.md) documentation.
For more examples, see the [examples](examples) directory. For more information on working with a Modelfile, see the [Modelfile](docs/modelfile.md) documentation.
### Pull a model from the registry
## CLI Reference
### Create a model
`ollama create` is used to create a model from a Modelfile.
### Pull a model
```
ollama pull orca
ollama pull llama2
```
### Listing local models
> This command can also be used to update a local model. Only the diff will be pulled.
### Remove a model
```
ollama rm llama2
```
### Copy a model
```
ollama cp llama2 my-llama2
```
### Multiline input
For multiline input, you can wrap text with `"""`:
```
>>> """Hello,
... world!
... """
I'm a basic program that prints the famous "Hello, world!" message to the console.
```
### Pass in prompt as arguments
```
$ ollama run llama2 "summarize this file:" "$(cat README.md)"
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
```
### List models on your computer
```
ollama list
```
## Model packages
### Start Ollama
### Overview
Ollama bundles model weights, configuration, and data into a single package, defined by a [Modelfile](./docs/modelfile.md).
<picture>
<source media="(prefers-color-scheme: dark)" height="480" srcset="https://github.com/jmorganca/ollama/assets/251292/2fd96b5f-191b-45c1-9668-941cfad4eb70">
<img alt="logo" height="480" src="https://github.com/jmorganca/ollama/assets/251292/2fd96b5f-191b-45c1-9668-941cfad4eb70">
</picture>
`ollama serve` is used when you want to start ollama without running the desktop application.
## Building
Install `cmake` and `go`:
```
brew install cmake go
```
Then generate dependencies and build:
```
go generate ./...
go build .
```
To run it start the server:
Next, start the server:
```
./ollama serve &
./ollama serve
```
Finally, run a model!
Finally, in a separate shell, run a model:
```
./ollama run llama2
@@ -144,9 +203,8 @@ Finally, run a model!
## REST API
> See the [API documentation](./docs/api.md) for all endpoints.
Ollama has an API for running and managing models. For example to generate text from a model:
Ollama has a REST API for running and managing models.
For example, to generate text from a model:
```
curl -X POST http://localhost:11434/api/generate -d '{
@@ -155,12 +213,25 @@ curl -X POST http://localhost:11434/api/generate -d '{
}'
```
## Tools using Ollama
See the [API documentation](./docs/api.md) for all endpoints.
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with a question-answering [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa).
- [Continue](https://github.com/continuedev/continue) - embeds Ollama inside Visual Studio Code. The extension lets you highlight code to add to the prompt, ask questions in the sidebar, and generate code inline.
- [LiteLLM](https://github.com/BerriAI/litellm) a lightweight python package to simplify LLM API calls
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) - interact with Ollama as a chatbot on Discord.
- [Raycast Ollama](https://github.com/MassimilianoPasquini97/raycast_ollama) - Raycast extension to use Ollama for local llama inference on Raycast.
- [Simple HTML UI for Ollama](https://github.com/rtcfirefly/ollama-ui)
- [Emacs client](https://github.com/zweifisch/ollama) for Ollama
## Community Integrations
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama)
- [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel)
- [Continue](https://github.com/continuedev/continue)
- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)
- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)
- [LiteLLM](https://github.com/BerriAI/litellm)
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
- [Dumbar](https://github.com/JerrySievert/Dumbar)
- [Emacs client](https://github.com/zweifisch/ollama)
- [oterm](https://github.com/ggozad/oterm)
- [Ellama Emacs client](https://github.com/s-kostyaev/ellama)
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)

View File

@@ -7,26 +7,24 @@ import (
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
)
const DefaultHost = "localhost:11434"
var (
envHost = os.Getenv("OLLAMA_HOST")
"github.com/jmorganca/ollama/format"
"github.com/jmorganca/ollama/version"
)
type Client struct {
Base url.URL
HTTP http.Client
Headers http.Header
base *url.URL
http http.Client
}
func checkError(resp *http.Response, body []byte) error {
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
if resp.StatusCode < http.StatusBadRequest {
return nil
}
@@ -41,34 +39,56 @@ func checkError(resp *http.Response, body []byte) error {
return apiError
}
// Host returns the default host to use for the client. It is determined in the following order:
// 1. The OLLAMA_HOST environment variable
// 2. The default host (localhost:11434)
func Host() string {
if envHost != "" {
return envHost
}
return DefaultHost
}
func ClientFromEnvironment() (*Client, error) {
defaultPort := "11434"
// FromEnv creates a new client using Host() as the host. An error is returns
// if the host is invalid.
func FromEnv() (*Client, error) {
h := Host()
if !strings.HasPrefix(h, "http://") && !strings.HasPrefix(h, "https://") {
h = "http://" + h
scheme, hostport, ok := strings.Cut(os.Getenv("OLLAMA_HOST"), "://")
switch {
case !ok:
scheme, hostport = "http", os.Getenv("OLLAMA_HOST")
case scheme == "http":
defaultPort = "80"
case scheme == "https":
defaultPort = "443"
}
u, err := url.Parse(h)
// trim trailing slashes
hostport = strings.TrimRight(hostport, "/")
host, port, err := net.SplitHostPort(hostport)
if err != nil {
return nil, fmt.Errorf("could not parse host: %w", err)
host, port = "127.0.0.1", defaultPort
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
host = ip.String()
} else if hostport != "" {
host = hostport
}
}
if u.Port() == "" {
u.Host += ":11434"
client := Client{
base: &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(host, port),
},
}
return &Client{Base: *u, HTTP: http.Client{}}, nil
mockRequest, err := http.NewRequest("HEAD", client.base.String(), nil)
if err != nil {
return nil, err
}
proxyURL, err := http.ProxyFromEnvironment(mockRequest)
if err != nil {
return nil, err
}
client.http = http.Client{
Transport: &http.Transport{
Proxy: http.ProxyURL(proxyURL),
},
}
return &client, nil
}
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
@@ -83,21 +103,17 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
reqBody = bytes.NewReader(data)
}
url := c.Base.JoinPath(path).String()
req, err := http.NewRequestWithContext(ctx, method, url, reqBody)
requestURL := c.base.JoinPath(path)
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), reqBody)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Accept", "application/json")
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
for k, v := range c.Headers {
req.Header[k] = v
}
respObj, err := c.HTTP.Do(req)
respObj, err := c.http.Do(request)
if err != nil {
return err
}
@@ -120,6 +136,8 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
return nil
}
const maxBufferSize = 512 * format.KiloByte
func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error {
var buf *bytes.Buffer
if data != nil {
@@ -131,21 +149,26 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
buf = bytes.NewBuffer(bts)
}
request, err := http.NewRequestWithContext(ctx, method, c.Base.JoinPath(path).String(), buf)
requestURL := c.base.JoinPath(path)
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), buf)
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
request.Header.Set("Accept", "application/json")
request.Header.Set("Accept", "application/x-ndjson")
request.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
response, err := http.DefaultClient.Do(request)
response, err := c.http.Do(request)
if err != nil {
return err
}
defer response.Body.Close()
scanner := bufio.NewScanner(response.Body)
// increase the buffer size to avoid running out of space
scanBuf := make([]byte, 0, maxBufferSize)
scanner.Buffer(scanBuf, maxBufferSize)
for scanner.Scan() {
var errorResponse struct {
Error string `json:"error,omitempty"`
@@ -160,7 +183,7 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
return fmt.Errorf(errorResponse.Error)
}
if response.StatusCode >= 400 {
if response.StatusCode >= http.StatusBadRequest {
return StatusError{
StatusCode: response.StatusCode,
Status: response.Status,
@@ -250,6 +273,14 @@ func (c *Client) Delete(ctx context.Context, req *DeleteRequest) error {
return nil
}
func (c *Client) Show(ctx context.Context, req *ShowRequest) (*ShowResponse, error) {
var resp ShowResponse
if err := c.do(ctx, http.MethodPost, "/api/show", req, &resp); err != nil {
return nil, err
}
return &resp, nil
}
func (c *Client) Heartbeat(ctx context.Context) error {
if err := c.do(ctx, http.MethodHead, "/", nil, nil); err != nil {
return err

225
api/client.py Normal file
View File

@@ -0,0 +1,225 @@
import os
import json
import requests
BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
# Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses.
# The final response object will include statistics and additional data from the request. Use the callback function to override
# the default handler.
def generate(model_name, prompt, system=None, template=None, context=None, options=None, callback=None):
try:
url = f"{BASE_URL}/api/generate"
payload = {
"model": model_name,
"prompt": prompt,
"system": system,
"template": template,
"context": context,
"options": options
}
# Remove keys with None values
payload = {k: v for k, v in payload.items() if v is not None}
with requests.post(url, json=payload, stream=True) as response:
response.raise_for_status()
# Creating a variable to hold the context history of the final chunk
final_context = None
# Variable to hold concatenated response strings if no callback is provided
full_response = ""
# Iterating over the response line by line and displaying the details
for line in response.iter_lines():
if line:
# Parsing each line (JSON chunk) and extracting the details
chunk = json.loads(line)
# If a callback function is provided, call it with the chunk
if callback:
callback(chunk)
else:
# If this is not the last chunk, add the "response" field value to full_response and print it
if not chunk.get("done"):
response_piece = chunk.get("response", "")
full_response += response_piece
print(response_piece, end="", flush=True)
# Check if it's the last chunk (done is true)
if chunk.get("done"):
final_context = chunk.get("context")
# Return the full response and the final context
return full_response, final_context
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None, None
# Create a model from a Modelfile. Use the callback function to override the default handler.
def create(model_name, model_path, callback=None):
try:
url = f"{BASE_URL}/api/create"
payload = {"name": model_name, "path": model_path}
# Making a POST request with the stream parameter set to True to handle streaming responses
with requests.post(url, json=payload, stream=True) as response:
response.raise_for_status()
# Iterating over the response line by line and displaying the status
for line in response.iter_lines():
if line:
# Parsing each line (JSON chunk) and extracting the status
chunk = json.loads(line)
if callback:
callback(chunk)
else:
print(f"Status: {chunk.get('status')}")
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
# Pull a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple
# calls to will share the same download progress. Use the callback function to override the default handler.
def pull(model_name, insecure=False, callback=None):
try:
url = f"{BASE_URL}/api/pull"
payload = {
"name": model_name,
"insecure": insecure
}
# Making a POST request with the stream parameter set to True to handle streaming responses
with requests.post(url, json=payload, stream=True) as response:
response.raise_for_status()
# Iterating over the response line by line and displaying the details
for line in response.iter_lines():
if line:
# Parsing each line (JSON chunk) and extracting the details
chunk = json.loads(line)
# If a callback function is provided, call it with the chunk
if callback:
callback(chunk)
else:
# Print the status message directly to the console
print(chunk.get('status', ''), end='', flush=True)
# If there's layer data, you might also want to print that (adjust as necessary)
if 'digest' in chunk:
print(f" - Digest: {chunk['digest']}", end='', flush=True)
print(f" - Total: {chunk['total']}", end='', flush=True)
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
else:
print()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
# Push a model to the model registry. Use the callback function to override the default handler.
def push(model_name, insecure=False, callback=None):
try:
url = f"{BASE_URL}/api/push"
payload = {
"name": model_name,
"insecure": insecure
}
# Making a POST request with the stream parameter set to True to handle streaming responses
with requests.post(url, json=payload, stream=True) as response:
response.raise_for_status()
# Iterating over the response line by line and displaying the details
for line in response.iter_lines():
if line:
# Parsing each line (JSON chunk) and extracting the details
chunk = json.loads(line)
# If a callback function is provided, call it with the chunk
if callback:
callback(chunk)
else:
# Print the status message directly to the console
print(chunk.get('status', ''), end='', flush=True)
# If there's layer data, you might also want to print that (adjust as necessary)
if 'digest' in chunk:
print(f" - Digest: {chunk['digest']}", end='', flush=True)
print(f" - Total: {chunk['total']}", end='', flush=True)
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
else:
print()
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
# List models that are available locally.
def list():
try:
response = requests.get(f"{BASE_URL}/api/tags")
response.raise_for_status()
data = response.json()
models = data.get('models', [])
return models
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Copy a model. Creates a model with another name from an existing model.
def copy(source, destination):
try:
# Create the JSON payload
payload = {
"source": source,
"destination": destination
}
response = requests.post(f"{BASE_URL}/api/copy", json=payload)
response.raise_for_status()
# If the request was successful, return a message indicating that the copy was successful
return "Copy successful"
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Delete a model and its data.
def delete(model_name):
try:
url = f"{BASE_URL}/api/delete"
payload = {"name": model_name}
response = requests.delete(url, json=payload)
response.raise_for_status()
return "Delete successful"
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
# Show info about a model.
def show(model_name):
try:
url = f"{BASE_URL}/api/show"
payload = {"name": model_name}
response = requests.post(url, json=payload)
response.raise_for_status()
# Parse the JSON response and return it
data = response.json()
return data
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return None
def heartbeat():
try:
url = f"{BASE_URL}/"
response = requests.head(url)
response.raise_for_status()
return "Ollama is running"
except requests.exceptions.RequestException as e:
print(f"An error occurred: {e}")
return "Ollama is not running"

43
api/client_test.go Normal file
View File

@@ -0,0 +1,43 @@
package api
import "testing"
func TestClientFromEnvironment(t *testing.T) {
type testCase struct {
value string
expect string
err error
}
testCases := map[string]*testCase{
"empty": {value: "", expect: "http://127.0.0.1:11434"},
"only address": {value: "1.2.3.4", expect: "http://1.2.3.4:11434"},
"only port": {value: ":1234", expect: "http://:1234"},
"address and port": {value: "1.2.3.4:1234", expect: "http://1.2.3.4:1234"},
"scheme http and address": {value: "http://1.2.3.4", expect: "http://1.2.3.4:80"},
"scheme https and address": {value: "https://1.2.3.4", expect: "https://1.2.3.4:443"},
"scheme, address, and port": {value: "https://1.2.3.4:1234", expect: "https://1.2.3.4:1234"},
"hostname": {value: "example.com", expect: "http://example.com:11434"},
"hostname and port": {value: "example.com:1234", expect: "http://example.com:1234"},
"scheme http and hostname": {value: "http://example.com", expect: "http://example.com:80"},
"scheme https and hostname": {value: "https://example.com", expect: "https://example.com:443"},
"scheme, hostname, and port": {value: "https://example.com:1234", expect: "https://example.com:1234"},
"trailing slash": {value: "example.com/", expect: "http://example.com:11434"},
"trailing slash port": {value: "example.com:1234/", expect: "http://example.com:1234"},
}
for k, v := range testCases {
t.Run(k, func(t *testing.T) {
t.Setenv("OLLAMA_HOST", v.value)
client, err := ClientFromEnvironment()
if err != v.err {
t.Fatalf("expected %s, got %s", v.err, err)
}
if client.base.String() != v.expect {
t.Fatalf("expected %s, got %s", v.expect, client.base.String())
}
})
}
}

View File

@@ -3,11 +3,9 @@ package api
import (
"encoding/json"
"fmt"
"log"
"math"
"os"
"reflect"
"runtime"
"strings"
"time"
)
@@ -38,6 +36,7 @@ type GenerateRequest struct {
System string `json:"system"`
Template string `json:"template"`
Context []int `json:"context,omitempty"`
Stream *bool `json:"stream,omitempty"`
Options map[string]interface{} `json:"options"`
}
@@ -54,14 +53,27 @@ type EmbeddingResponse struct {
}
type CreateRequest struct {
Name string `json:"name"`
Path string `json:"path"`
Name string `json:"name"`
Path string `json:"path"`
Stream *bool `json:"stream,omitempty"`
}
type DeleteRequest struct {
Name string `json:"name"`
}
type ShowRequest struct {
Name string `json:"name"`
}
type ShowResponse struct {
License string `json:"license,omitempty"`
Modelfile string `json:"modelfile,omitempty"`
Parameters string `json:"parameters,omitempty"`
Template string `json:"template,omitempty"`
System string `json:"system,omitempty"`
}
type CopyRequest struct {
Source string `json:"source"`
Destination string `json:"destination"`
@@ -72,13 +84,14 @@ type PullRequest struct {
Insecure bool `json:"insecure,omitempty"`
Username string `json:"username"`
Password string `json:"password"`
Stream *bool `json:"stream,omitempty"`
}
type ProgressResponse struct {
Status string `json:"status"`
Digest string `json:"digest,omitempty"`
Total int `json:"total,omitempty"`
Completed int `json:"completed,omitempty"`
Total int64 `json:"total,omitempty"`
Completed int64 `json:"completed,omitempty"`
}
type PushRequest struct {
@@ -86,16 +99,18 @@ type PushRequest struct {
Insecure bool `json:"insecure,omitempty"`
Username string `json:"username"`
Password string `json:"password"`
Stream *bool `json:"stream,omitempty"`
}
type ListResponse struct {
Models []ListResponseModel `json:"models"`
Models []ModelResponse `json:"models"`
}
type ListResponseModel struct {
type ModelResponse struct {
Name string `json:"name"`
ModifiedAt time.Time `json:"modified_at"`
Size int `json:"size"`
Size int64 `json:"size"`
Digest string `json:"digest"`
}
type TokenResponse struct {
@@ -105,15 +120,13 @@ type TokenResponse struct {
type GenerateResponse struct {
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Response string `json:"response,omitempty"`
Response string `json:"response"`
Done bool `json:"done"`
Context []int `json:"context,omitempty"`
TotalDuration time.Duration `json:"total_duration,omitempty"`
LoadDuration time.Duration `json:"load_duration,omitempty"`
SampleCount int `json:"sample_count,omitempty"`
SampleDuration time.Duration `json:"sample_duration,omitempty"`
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"`
EvalCount int `json:"eval_count,omitempty"`
@@ -129,15 +142,6 @@ func (r *GenerateResponse) Summary() {
fmt.Fprintf(os.Stderr, "load duration: %v\n", r.LoadDuration)
}
if r.SampleCount > 0 {
fmt.Fprintf(os.Stderr, "sample count: %d token(s)\n", r.SampleCount)
}
if r.SampleDuration > 0 {
fmt.Fprintf(os.Stderr, "sample duration: %s\n", r.SampleDuration)
fmt.Fprintf(os.Stderr, "sample rate: %.2f tokens/s\n", float64(r.SampleCount)/r.SampleDuration.Seconds())
}
if r.PromptEvalCount > 0 {
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
}
@@ -157,15 +161,10 @@ func (r *GenerateResponse) Summary() {
}
}
type Options struct {
Seed int `json:"seed,omitempty"`
// Backend options
UseNUMA bool `json:"numa,omitempty"`
// Model options
// Runner options which must be set when the model is loaded into memory
type Runner struct {
UseNUMA bool `json:"numa,omitempty"`
NumCtx int `json:"num_ctx,omitempty"`
NumKeep int `json:"num_keep,omitempty"`
NumBatch int `json:"num_batch,omitempty"`
NumGQA int `json:"num_gqa,omitempty"`
NumGPU int `json:"num_gpu,omitempty"`
@@ -179,26 +178,34 @@ type Options struct {
EmbeddingOnly bool `json:"embedding_only,omitempty"`
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
NumThread int `json:"num_thread,omitempty"`
}
// Predict options
RepeatLastN int `json:"repeat_last_n,omitempty"`
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
type Options struct {
Runner
// Predict options used at runtime
NumKeep int `json:"num_keep,omitempty"`
Seed int `json:"seed,omitempty"`
NumPredict int `json:"num_predict,omitempty"`
TopK int `json:"top_k,omitempty"`
TopP float32 `json:"top_p,omitempty"`
TFSZ float32 `json:"tfs_z,omitempty"`
TypicalP float32 `json:"typical_p,omitempty"`
RepeatLastN int `json:"repeat_last_n,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
Mirostat int `json:"mirostat,omitempty"`
MirostatTau float32 `json:"mirostat_tau,omitempty"`
MirostatEta float32 `json:"mirostat_eta,omitempty"`
PenalizeNewline bool `json:"penalize_newline,omitempty"`
Stop []string `json:"stop,omitempty"`
NumThread int `json:"num_thread,omitempty"`
}
var ErrInvalidOpts = fmt.Errorf("invalid options")
func (opts *Options) FromMap(m map[string]interface{}) error {
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct
@@ -212,6 +219,7 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
}
}
invalidOpts := []string{}
for key, val := range m {
if opt, ok := jsonOpts[key]; ok {
field := valueOpts.FieldByName(opt.Name)
@@ -229,44 +237,39 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
// when JSON unmarshals numbers, it uses float64, not int
field.SetInt(int64(t))
default:
log.Printf("could not convert model parameter %v to int, skipped", key)
return fmt.Errorf("option %q must be of type integer", key)
}
case reflect.Bool:
val, ok := val.(bool)
if !ok {
log.Printf("could not convert model parameter %v to bool, skipped", key)
continue
return fmt.Errorf("option %q must be of type boolean", key)
}
field.SetBool(val)
case reflect.Float32:
// JSON unmarshals to float64
val, ok := val.(float64)
if !ok {
log.Printf("could not convert model parameter %v to float32, skipped", key)
continue
return fmt.Errorf("option %q must be of type float32", key)
}
field.SetFloat(val)
case reflect.String:
val, ok := val.(string)
if !ok {
log.Printf("could not convert model parameter %v to string, skipped", key)
continue
return fmt.Errorf("option %q must be of type string", key)
}
field.SetString(val)
case reflect.Slice:
// JSON unmarshals to []interface{}, not []string
val, ok := val.([]interface{})
if !ok {
log.Printf("could not convert model parameter %v to slice, skipped", key)
continue
return fmt.Errorf("option %q must be of type array", key)
}
// convert []interface{} to []string
slice := make([]string, len(val))
for i, item := range val {
str, ok := item.(string)
if !ok {
log.Printf("could not convert model parameter %v to slice of strings, skipped", key)
continue
return fmt.Errorf("option %q must be of an array of strings", key)
}
slice[i] = str
}
@@ -275,45 +278,53 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
}
}
} else {
invalidOpts = append(invalidOpts, key)
}
}
if len(invalidOpts) > 0 {
return fmt.Errorf("%w: %v", ErrInvalidOpts, strings.Join(invalidOpts, ", "))
}
return nil
}
func DefaultOptions() Options {
return Options{
Seed: -1,
UseNUMA: false,
NumCtx: 2048,
NumKeep: -1,
NumBatch: 512,
NumGPU: 1,
NumGQA: 1,
LowVRAM: false,
F16KV: true,
UseMMap: true,
UseMLock: false,
RopeFrequencyBase: 10000.0,
RopeFrequencyScale: 1.0,
EmbeddingOnly: true,
RepeatLastN: 64,
RepeatPenalty: 1.1,
FrequencyPenalty: 0.0,
PresencePenalty: 0.0,
// options set on request to runner
NumPredict: -1,
NumKeep: -1,
Temperature: 0.8,
TopK: 40,
TopP: 0.9,
TFSZ: 1.0,
TypicalP: 1.0,
RepeatLastN: 64,
RepeatPenalty: 1.1,
PresencePenalty: 0.0,
FrequencyPenalty: 0.0,
Mirostat: 0,
MirostatTau: 5.0,
MirostatEta: 0.1,
PenalizeNewline: true,
Seed: -1,
NumThread: runtime.NumCPU(),
Runner: Runner{
// options set when the model is loaded
NumCtx: 2048,
RopeFrequencyBase: 10000.0,
RopeFrequencyScale: 1.0,
NumBatch: 512,
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
NumGQA: 1,
NumThread: 0, // let the runtime decide
LowVRAM: false,
F16KV: true,
UseMLock: false,
UseMMap: true,
UseNUMA: false,
EmbeddingOnly: true,
},
}
}

View File

@@ -27,7 +27,6 @@ const config: ForgeConfig = {
path.join(__dirname, './assets/iconDarkTemplate@2x.png'),
path.join(__dirname, './assets/iconDarkUpdateTemplate.png'),
path.join(__dirname, './assets/iconDarkUpdateTemplate@2x.png'),
...(process.platform === 'darwin' ? ['../llm/ggml-metal.metal'] : []),
],
...(process.env.SIGN
? {
@@ -48,16 +47,6 @@ const config: ForgeConfig = {
},
rebuildConfig: {},
makers: [new MakerSquirrel({}), new MakerZIP({}, ['darwin'])],
publishers: [
new PublisherGithub({
repository: {
name: 'ollama',
owner: 'jmorganca',
},
draft: false,
prerelease: true,
}),
],
hooks: {
readPackageJson: async (_, packageJson) => {
return { ...packageJson, version: process.env.VERSION || packageJson.version }

992
app/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -46,7 +46,7 @@
"chmodr": "^1.2.0",
"copy-webpack-plugin": "^11.0.0",
"css-loader": "^6.8.1",
"electron": "25.2.0",
"electron": "25.9.2",
"eslint": "^8.43.0",
"eslint-plugin-import": "^2.27.5",
"fork-ts-checker-webpack-plugin": "^7.3.0",

View File

@@ -5,7 +5,7 @@ import winston from 'winston'
import 'winston-daily-rotate-file'
import * as path from 'path'
import { analytics, id } from './telemetry'
import { v4 as uuidv4 } from 'uuid'
import { installed } from './install'
require('@electron/remote/main').initialize()
@@ -158,17 +158,60 @@ function restart() {
app.on('before-quit', () => {
if (proc) {
proc.off('exit', restart)
proc.kill()
proc.kill('SIGINT') // send SIGINT signal to the server, which also stops any loaded llms
}
})
const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${
process.arch
}&version=${app.getVersion()}&id=${id()}`
let latest = ''
async function isNewReleaseAvailable() {
try {
const response = await fetch(updateURL)
if (!response.ok) {
return false
}
if (response.status === 204) {
return false
}
const data = await response.json()
const url = data?.url
if (!url) {
return false
}
if (latest === url) {
return false
}
latest = url
return true
} catch (error) {
logger.error(`update check failed - ${error}`)
return false
}
}
async function checkUpdate() {
const available = await isNewReleaseAvailable()
if (available) {
logger.info('checking for update')
autoUpdater.checkForUpdates()
}
}
function init() {
if (app.isPackaged) {
heartbeat()
autoUpdater.checkForUpdates()
checkUpdate()
setInterval(() => {
heartbeat()
autoUpdater.checkForUpdates()
checkUpdate()
}, 60 * 60 * 1000)
}
@@ -234,28 +277,22 @@ app.on('window-all-closed', () => {
}
})
// In this file you can include the rest of your app's specific main process
// code. You can also put them in separate files and import them here.
let aid = ''
try {
aid = id()
} catch (e) {}
function id(): string {
const id = store.get('id') as string
autoUpdater.setFeedURL({
url: `https://ollama.ai/api/update?os=${process.platform}&arch=${process.arch}&version=${app.getVersion()}&id=${aid}`,
})
if (id) {
return id
}
async function heartbeat() {
analytics.track({
anonymousId: aid,
event: 'heartbeat',
properties: {
version: app.getVersion(),
},
})
const uuid = uuidv4()
store.set('id', uuid)
return uuid
}
autoUpdater.setFeedURL({ url: updateURL })
autoUpdater.on('error', e => {
logger.error(`update check failed - ${e.message}`)
console.error(`update check failed - ${e.message}`)
})

View File

@@ -1,19 +0,0 @@
import { Analytics } from '@segment/analytics-node'
import { v4 as uuidv4 } from 'uuid'
import Store from 'electron-store'
const store = new Store()
export const analytics = new Analytics({ writeKey: process.env.TELEMETRY_WRITE_KEY || '<empty>' })
export function id(): string {
const id = store.get('id') as string
if (id) {
return id
}
const uuid = uuidv4()
store.set('id', uuid)
return uuid
}

View File

@@ -11,25 +11,27 @@ import (
"io"
"log"
"net"
"net/http"
"os"
"os/exec"
"path"
"os/signal"
"path/filepath"
"runtime"
"strings"
"syscall"
"time"
"github.com/chzyer/readline"
"github.com/dustin/go-humanize"
"github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh"
"golang.org/x/term"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/format"
"github.com/jmorganca/ollama/progressbar"
"github.com/jmorganca/ollama/readline"
"github.com/jmorganca/ollama/server"
"github.com/jmorganca/ollama/version"
)
func CreateHandler(cmd *cobra.Command, args []string) error {
@@ -39,7 +41,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
return err
}
client, err := api.FromEnv()
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
@@ -56,20 +58,14 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
spinner.Stop()
}
currentDigest = resp.Digest
switch {
case strings.Contains(resp.Status, "embeddings"):
bar = progressbar.Default(int64(resp.Total), resp.Status)
bar.Set(resp.Completed)
default:
// pulling
bar = progressbar.DefaultBytes(
int64(resp.Total),
resp.Status,
)
bar.Set(resp.Completed)
}
// pulling
bar = progressbar.DefaultBytes(
resp.Total,
resp.Status,
)
bar.Set64(resp.Completed)
} else if resp.Digest == currentDigest && resp.Digest != "" {
bar.Set(resp.Completed)
bar.Set64(resp.Completed)
} else {
currentDigest = ""
if spinner != nil {
@@ -97,26 +93,24 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
}
func RunHandler(cmd *cobra.Command, args []string) error {
mp := server.ParseModelPath(args[0])
fp, err := mp.GetManifestPath(false)
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
_, err = os.Stat(fp)
switch {
case errors.Is(err, os.ErrNotExist):
if err := pull(args[0], false); err != nil {
var apiStatusError api.StatusError
if !errors.As(err, &apiStatusError) {
return err
}
models, err := client.List(context.Background())
if err != nil {
return err
}
if apiStatusError.StatusCode != http.StatusBadGateway {
return err
}
canonicalModelPath := server.ParseModelPath(args[0])
for _, model := range models.Models {
if model.Name == canonicalModelPath.GetShortTagname() {
return RunGenerate(cmd, args)
}
case err != nil:
}
if err := PullHandler(cmd, args); err != nil {
return err
}
@@ -124,7 +118,7 @@ func RunHandler(cmd *cobra.Command, args []string) error {
}
func PushHandler(cmd *cobra.Command, args []string) error {
client, err := api.FromEnv()
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
@@ -142,13 +136,13 @@ func PushHandler(cmd *cobra.Command, args []string) error {
if resp.Digest != currentDigest && resp.Digest != "" {
currentDigest = resp.Digest
bar = progressbar.DefaultBytes(
int64(resp.Total),
resp.Total,
fmt.Sprintf("pushing %s...", resp.Digest[7:19]),
)
bar.Set(resp.Completed)
bar.Set64(resp.Completed)
} else if resp.Digest == currentDigest && resp.Digest != "" {
bar.Set(resp.Completed)
bar.Set64(resp.Completed)
} else {
currentDigest = ""
fmt.Println(resp.Status)
@@ -168,7 +162,7 @@ func PushHandler(cmd *cobra.Command, args []string) error {
}
func ListHandler(cmd *cobra.Command, args []string) error {
client, err := api.FromEnv()
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
@@ -182,12 +176,12 @@ func ListHandler(cmd *cobra.Command, args []string) error {
for _, m := range models.Models {
if len(args) == 0 || strings.HasPrefix(m.Name, args[0]) {
data = append(data, []string{m.Name, humanize.Bytes(uint64(m.Size)), format.HumanTime(m.ModifiedAt, "Never")})
data = append(data, []string{m.Name, m.Digest[:12], humanize.Bytes(uint64(m.Size)), format.HumanTime(m.ModifiedAt, "Never")})
}
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"NAME", "SIZE", "MODIFIED"})
table.SetHeader([]string{"NAME", "ID", "SIZE", "MODIFIED"})
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetHeaderLine(false)
@@ -201,21 +195,101 @@ func ListHandler(cmd *cobra.Command, args []string) error {
}
func DeleteHandler(cmd *cobra.Command, args []string) error {
client, err := api.FromEnv()
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
req := api.DeleteRequest{Name: args[0]}
if err := client.Delete(context.Background(), &req); err != nil {
for _, name := range args {
req := api.DeleteRequest{Name: name}
if err := client.Delete(context.Background(), &req); err != nil {
return err
}
fmt.Printf("deleted '%s'\n", name)
}
return nil
}
func ShowHandler(cmd *cobra.Command, args []string) error {
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
fmt.Printf("deleted '%s'\n", args[0])
if len(args) != 1 {
return errors.New("missing model name")
}
license, errLicense := cmd.Flags().GetBool("license")
modelfile, errModelfile := cmd.Flags().GetBool("modelfile")
parameters, errParams := cmd.Flags().GetBool("parameters")
system, errSystem := cmd.Flags().GetBool("system")
template, errTemplate := cmd.Flags().GetBool("template")
for _, boolErr := range []error{errLicense, errModelfile, errParams, errSystem, errTemplate} {
if boolErr != nil {
return errors.New("error retrieving flags")
}
}
flagsSet := 0
showType := ""
if license {
flagsSet++
showType = "license"
}
if modelfile {
flagsSet++
showType = "modelfile"
}
if parameters {
flagsSet++
showType = "parameters"
}
if system {
flagsSet++
showType = "system"
}
if template {
flagsSet++
showType = "template"
}
if flagsSet > 1 {
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
} else if flagsSet == 0 {
return errors.New("one of '--license', '--modelfile', '--parameters', '--system', or '--template' must be specified")
}
req := api.ShowRequest{Name: args[0]}
resp, err := client.Show(context.Background(), &req)
if err != nil {
return err
}
switch showType {
case "license":
fmt.Println(resp.License)
case "modelfile":
fmt.Println(resp.Modelfile)
case "parameters":
fmt.Println(resp.Parameters)
case "system":
fmt.Println(resp.System)
case "template":
fmt.Println(resp.Template)
}
return nil
}
func CopyHandler(cmd *cobra.Command, args []string) error {
client, err := api.FromEnv()
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
@@ -238,7 +312,7 @@ func PullHandler(cmd *cobra.Command, args []string) error {
}
func pull(model string, insecure bool) error {
client, err := api.FromEnv()
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
@@ -251,13 +325,13 @@ func pull(model string, insecure bool) error {
if resp.Digest != currentDigest && resp.Digest != "" {
currentDigest = resp.Digest
bar = progressbar.DefaultBytes(
int64(resp.Total),
resp.Total,
fmt.Sprintf("pulling %s...", resp.Digest[7:19]),
)
bar.Set(resp.Completed)
bar.Set64(resp.Completed)
} else if resp.Digest == currentDigest && resp.Digest != "" {
bar.Set(resp.Completed)
bar.Set64(resp.Completed)
} else {
currentDigest = ""
fmt.Println(resp.Status)
@@ -280,7 +354,20 @@ func pull(model string, insecure bool) error {
func RunGenerate(cmd *cobra.Command, args []string) error {
if len(args) > 1 {
// join all args into a single prompt
return generate(cmd, args[0], strings.Join(args[1:], " "))
wordWrap := false
if term.IsTerminal(int(os.Stdout.Fd())) {
wordWrap = true
}
nowrap, err := cmd.Flags().GetBool("nowordwrap")
if err != nil {
return err
}
if nowrap {
wordWrap = false
}
return generate(cmd, args[0], strings.Join(args[1:], " "), wordWrap)
}
if readline.IsTerminal(int(os.Stdin.Fd())) {
@@ -292,146 +379,193 @@ func RunGenerate(cmd *cobra.Command, args []string) error {
type generateContextKey string
func generate(cmd *cobra.Command, model, prompt string) error {
if len(strings.TrimSpace(prompt)) > 0 {
client, err := api.FromEnv()
if err != nil {
return err
func generate(cmd *cobra.Command, model, prompt string, wordWrap bool) error {
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
spinner := NewSpinner("")
go spinner.Spin(60 * time.Millisecond)
var latest api.GenerateResponse
generateContext, ok := cmd.Context().Value(generateContextKey("context")).([]int)
if !ok {
generateContext = []int{}
}
termWidth, _, err := term.GetSize(int(0))
if err != nil {
wordWrap = false
}
cancelCtx, cancel := context.WithCancel(context.Background())
defer cancel()
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT)
var abort bool
go func() {
<-sigChan
cancel()
abort = true
}()
var currentLineLength int
var wordBuffer string
request := api.GenerateRequest{Model: model, Prompt: prompt, Context: generateContext}
fn := func(response api.GenerateResponse) error {
if !spinner.IsFinished() {
spinner.Finish()
}
spinner := NewSpinner("")
go spinner.Spin(60 * time.Millisecond)
latest = response
var latest api.GenerateResponse
if wordWrap {
for _, ch := range response.Response {
if currentLineLength+1 > termWidth-5 {
// backtrack the length of the last word and clear to the end of the line
fmt.Printf("\x1b[%dD\x1b[K\n", len(wordBuffer))
fmt.Printf("%s%c", wordBuffer, ch)
currentLineLength = len(wordBuffer) + 1
} else {
fmt.Print(string(ch))
currentLineLength += 1
generateContext, ok := cmd.Context().Value(generateContextKey("context")).([]int)
if !ok {
generateContext = []int{}
}
request := api.GenerateRequest{Model: model, Prompt: prompt, Context: generateContext}
fn := func(response api.GenerateResponse) error {
if !spinner.IsFinished() {
spinner.Finish()
switch ch {
case ' ':
wordBuffer = ""
case '\n':
currentLineLength = 0
default:
wordBuffer += string(ch)
}
}
}
latest = response
} else {
fmt.Print(response.Response)
}
return nil
}
if err := client.Generate(cancelCtx, &request, fn); err != nil {
if strings.Contains(err.Error(), "context canceled") && abort {
spinner.Finish()
return nil
}
if err := client.Generate(context.Background(), &request, fn); err != nil {
if strings.Contains(err.Error(), "failed to load model") {
// tell the user to check the server log, if it exists locally
home, nestedErr := os.UserHomeDir()
if nestedErr != nil {
// return the original error
return err
}
logPath := filepath.Join(home, ".ollama", "logs", "server.log")
if _, nestedErr := os.Stat(logPath); nestedErr == nil {
err = fmt.Errorf("%w\nFor more details, check the error logs at %s", err, logPath)
}
}
return err
}
fmt.Println()
fmt.Println()
if !latest.Done {
return errors.New("unexpected end of response")
}
verbose, err := cmd.Flags().GetBool("verbose")
if err != nil {
return err
}
if verbose {
latest.Summary()
}
ctx := cmd.Context()
ctx = context.WithValue(ctx, generateContextKey("context"), latest.Context)
cmd.SetContext(ctx)
return err
}
if prompt != "" {
fmt.Println()
fmt.Println()
}
if !latest.Done {
if abort {
return nil
}
return errors.New("unexpected end of response")
}
verbose, err := cmd.Flags().GetBool("verbose")
if err != nil {
return err
}
if verbose {
latest.Summary()
}
ctx := cmd.Context()
ctx = context.WithValue(ctx, generateContextKey("context"), latest.Context)
cmd.SetContext(ctx)
return nil
}
func showLayer(l *server.Layer) {
filename, err := server.GetBlobsPath(l.Digest)
if err != nil {
fmt.Println("Couldn't get layer's path")
return
}
bts, err := os.ReadFile(filename)
if err != nil {
fmt.Println("Couldn't read layer")
return
}
fmt.Println(string(bts))
}
func generateInteractive(cmd *cobra.Command, model string) error {
home, err := os.UserHomeDir()
if err != nil {
// load the model
if err := generate(cmd, model, "", false); err != nil {
return err
}
completer := readline.NewPrefixCompleter(
readline.PcItem("/help"),
readline.PcItem("/list"),
readline.PcItem("/set",
readline.PcItem("history"),
readline.PcItem("nohistory"),
readline.PcItem("verbose"),
readline.PcItem("quiet"),
readline.PcItem("mode",
readline.PcItem("vim"),
readline.PcItem("emacs"),
readline.PcItem("default"),
),
),
readline.PcItem("/show",
readline.PcItem("license"),
readline.PcItem("system"),
readline.PcItem("template"),
),
readline.PcItem("/exit"),
readline.PcItem("/bye"),
)
usage := func() {
fmt.Fprintln(os.Stderr, "commands:")
fmt.Fprintln(os.Stderr, completer.Tree(" "))
fmt.Fprintln(os.Stderr, "Available Commands:")
fmt.Fprintln(os.Stderr, " /set Set session variables")
fmt.Fprintln(os.Stderr, " /show Show model information")
fmt.Fprintln(os.Stderr, " /bye Exit")
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
fmt.Fprintln(os.Stderr, "")
fmt.Fprintln(os.Stderr, "Use \"\"\" to begin a multi-line message.")
fmt.Fprintln(os.Stderr, "")
}
config := readline.Config{
Prompt: ">>> ",
HistoryFile: filepath.Join(home, ".ollama", "history"),
AutoComplete: completer,
usageSet := func() {
fmt.Fprintln(os.Stderr, "Available Commands:")
fmt.Fprintln(os.Stderr, " /set history Enable history")
fmt.Fprintln(os.Stderr, " /set nohistory Disable history")
fmt.Fprintln(os.Stderr, " /set wordwrap Enable wordwrap")
fmt.Fprintln(os.Stderr, " /set nowordwrap Disable wordwrap")
fmt.Fprintln(os.Stderr, " /set verbose Show LLM stats")
fmt.Fprintln(os.Stderr, " /set quiet Disable LLM stats")
fmt.Fprintln(os.Stderr, "")
}
scanner, err := readline.NewEx(&config)
usageShow := func() {
fmt.Fprintln(os.Stderr, "Available Commands:")
fmt.Fprintln(os.Stderr, " /show license Show model license")
fmt.Fprintln(os.Stderr, " /show modelfile Show Modelfile for this model")
fmt.Fprintln(os.Stderr, " /show parameters Show parameters for this model")
fmt.Fprintln(os.Stderr, " /show system Show system prompt")
fmt.Fprintln(os.Stderr, " /show template Show prompt template")
fmt.Fprintln(os.Stderr, "")
}
prompt := readline.Prompt{
Prompt: ">>> ",
AltPrompt: "... ",
Placeholder: "Send a message (/? for help)",
AltPlaceholder: `Use """ to end multi-line input`,
}
scanner, err := readline.New(prompt)
if err != nil {
return err
}
defer scanner.Close()
var wordWrap bool
termType := os.Getenv("TERM")
if termType == "xterm-256color" {
wordWrap = true
}
// override wrapping if the user turned it off
nowrap, err := cmd.Flags().GetBool("nowordwrap")
if err != nil {
return err
}
if nowrap {
wordWrap = false
}
fmt.Print(readline.StartBracketedPaste)
defer fmt.Printf(readline.EndBracketedPaste)
var multiLineBuffer string
var isMultiLine bool
for {
line, err := scanner.Readline()
switch {
case errors.Is(err, io.EOF):
fmt.Println()
return nil
case errors.Is(err, readline.ErrInterrupt):
if line == "" {
return nil
fmt.Println("\nUse Ctrl-D or /bye to exit.")
}
continue
@@ -442,115 +576,121 @@ func generateInteractive(cmd *cobra.Command, model string) error {
line = strings.TrimSpace(line)
switch {
case isMultiLine:
case scanner.Prompt.UseAlt:
if strings.HasSuffix(line, `"""`) {
isMultiLine = false
scanner.Prompt.UseAlt = false
multiLineBuffer += strings.TrimSuffix(line, `"""`)
line = multiLineBuffer
multiLineBuffer = ""
scanner.SetPrompt(">>> ")
} else {
multiLineBuffer += line + " "
continue
}
case strings.HasPrefix(line, `"""`):
isMultiLine = true
scanner.Prompt.UseAlt = true
multiLineBuffer = strings.TrimPrefix(line, `"""`) + " "
scanner.SetPrompt("... ")
continue
case strings.HasPrefix(line, "/list"):
args := strings.Fields(line)
if err := ListHandler(cmd, args[1:]); err != nil {
return err
}
continue
case strings.HasPrefix(line, "/set"):
args := strings.Fields(line)
if len(args) > 1 {
switch args[1] {
case "history":
scanner.HistoryEnable()
continue
case "nohistory":
scanner.HistoryDisable()
continue
case "wordwrap":
wordWrap = true
fmt.Println("Set 'wordwrap' mode.")
case "nowordwrap":
wordWrap = false
fmt.Println("Set 'nowordwrap' mode.")
case "verbose":
cmd.Flags().Set("verbose", "true")
continue
fmt.Println("Set 'verbose' mode.")
case "quiet":
cmd.Flags().Set("verbose", "false")
continue
case "mode":
if len(args) > 2 {
switch args[2] {
case "vim":
scanner.SetVimMode(true)
continue
case "emacs", "default":
scanner.SetVimMode(false)
continue
default:
usage()
continue
}
} else {
usage()
continue
}
fmt.Println("Set 'quiet' mode.")
default:
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
}
} else {
usage()
continue
usageSet()
}
case strings.HasPrefix(line, "/show"):
args := strings.Fields(line)
if len(args) > 1 {
mp := server.ParseModelPath(model)
manifest, err := server.GetManifest(mp)
client, err := api.ClientFromEnvironment()
if err != nil {
fmt.Println("error: couldn't get a manifest for this model")
continue
fmt.Println("error: couldn't connect to ollama server")
return err
}
resp, err := client.Show(cmd.Context(), &api.ShowRequest{Name: model})
if err != nil {
fmt.Println("error: couldn't get model")
return err
}
switch args[1] {
case "license":
for _, l := range manifest.Layers {
if l.MediaType == "application/vnd.ollama.image.license" {
showLayer(l)
}
if resp.License == "" {
fmt.Print("No license was specified for this model.\n\n")
} else {
fmt.Println(resp.License)
}
case "modelfile":
fmt.Println(resp.Modelfile)
case "parameters":
if resp.Parameters == "" {
fmt.Print("No parameters were specified for this model.\n\n")
} else {
fmt.Println(resp.Parameters)
}
continue
case "system":
for _, l := range manifest.Layers {
if l.MediaType == "application/vnd.ollama.image.system" {
showLayer(l)
}
if resp.System == "" {
fmt.Print("No system prompt was specified for this model.\n\n")
} else {
fmt.Println(resp.System)
}
continue
case "template":
for _, l := range manifest.Layers {
if l.MediaType == "application/vnd.ollama.image.template" {
showLayer(l)
}
if resp.Template == "" {
fmt.Print("No prompt template was specified for this model.\n\n")
} else {
fmt.Println(resp.Template)
}
continue
default:
usage()
continue
fmt.Printf("Unknown command '/show %s'. Type /? for help\n", args[1])
}
} else {
usageShow()
}
case strings.HasPrefix(line, "/help"), strings.HasPrefix(line, "/?"):
args := strings.Fields(line)
if len(args) > 1 {
switch args[1] {
case "set", "/set":
usageSet()
case "show", "/show":
usageShow()
}
} else {
usage()
continue
}
case line == "/help", line == "/?":
usage()
continue
case line == "/exit", line == "/bye":
return nil
case strings.HasPrefix(line, "/"):
args := strings.Fields(line)
fmt.Printf("Unknown command '%s'. Type /? for help\n", args[0])
}
if err := generate(cmd, model, line); err != nil {
return err
if len(line) > 0 && line[0] != '/' {
if err := generate(cmd, model, line, wordWrap); err != nil {
return err
}
}
}
}
@@ -560,7 +700,7 @@ func generateBatch(cmd *cobra.Command, model string) error {
for scanner.Scan() {
prompt := scanner.Text()
fmt.Printf(">>> %s\n", prompt)
if err := generate(cmd, model, prompt); err != nil {
if err := generate(cmd, model, prompt, false); err != nil {
return err
}
}
@@ -569,28 +709,19 @@ func generateBatch(cmd *cobra.Command, model string) error {
}
func RunServer(cmd *cobra.Command, _ []string) error {
var host, port = "127.0.0.1", "11434"
parts := strings.Split(os.Getenv("OLLAMA_HOST"), ":")
if ip := net.ParseIP(parts[0]); ip != nil {
host = ip.String()
}
if len(parts) > 1 {
port = parts[1]
}
// deprecated: include port in OLLAMA_HOST
if p := os.Getenv("OLLAMA_PORT"); p != "" {
port = p
}
err := initializeKeypair()
host, port, err := net.SplitHostPort(os.Getenv("OLLAMA_HOST"))
if err != nil {
host, port = "127.0.0.1", "11434"
if ip := net.ParseIP(strings.Trim(os.Getenv("OLLAMA_HOST"), "[]")); ip != nil {
host = ip.String()
}
}
if err := initializeKeypair(); err != nil {
return err
}
ln, err := net.Listen("tcp", fmt.Sprintf("%s:%s", host, port))
ln, err := net.Listen("tcp", net.JoinHostPort(host, port))
if err != nil {
return err
}
@@ -600,6 +731,21 @@ func RunServer(cmd *cobra.Command, _ []string) error {
origins = strings.Split(o, ",")
}
if noprune := os.Getenv("OLLAMA_NOPRUNE"); noprune == "" {
if err := server.PruneLayers(); err != nil {
return err
}
manifestsPath, err := server.GetManifestPath()
if err != nil {
return err
}
if err := server.PruneDirectory(manifestsPath); err != nil {
return err
}
}
return server.Serve(ln, origins)
}
@@ -625,12 +771,12 @@ func initializeKeypair() error {
return err
}
err = os.MkdirAll(path.Dir(privKeyPath), 0o700)
err = os.MkdirAll(filepath.Dir(privKeyPath), 0o755)
if err != nil {
return fmt.Errorf("could not create directory %w", err)
}
err = os.WriteFile(privKeyPath, pem.EncodeToMemory(privKeyBytes), 0600)
err = os.WriteFile(privKeyPath, pem.EncodeToMemory(privKeyBytes), 0o600)
if err != nil {
return err
}
@@ -642,7 +788,7 @@ func initializeKeypair() error {
pubKeyData := ssh.MarshalAuthorizedKey(sshPrivateKey.PublicKey())
err = os.WriteFile(pubKeyPath, pubKeyData, 0644)
err = os.WriteFile(pubKeyPath, pubKeyData, 0o644)
if err != nil {
return err
}
@@ -684,7 +830,7 @@ func startMacApp(client *api.Client) error {
}
func checkServerHeartbeat(_ *cobra.Command, _ []string) error {
client, err := api.FromEnv()
client, err := api.ClientFromEnvironment()
if err != nil {
return err
}
@@ -714,6 +860,7 @@ func NewCLI() *cobra.Command {
CompletionOptions: cobra.CompletionOptions{
DisableDefaultCmd: true,
},
Version: version.Version,
}
cobra.EnableCommandSorting = false
@@ -721,13 +868,27 @@ func NewCLI() *cobra.Command {
createCmd := &cobra.Command{
Use: "create MODEL",
Short: "Create a model from a Modelfile",
Args: cobra.MinimumNArgs(1),
Args: cobra.ExactArgs(1),
PreRunE: checkServerHeartbeat,
RunE: CreateHandler,
}
createCmd.Flags().StringP("file", "f", "Modelfile", "Name of the Modelfile (default \"Modelfile\")")
showCmd := &cobra.Command{
Use: "show MODEL",
Short: "Show information for a model",
Args: cobra.ExactArgs(1),
PreRunE: checkServerHeartbeat,
RunE: ShowHandler,
}
showCmd.Flags().Bool("license", false, "Show license of a model")
showCmd.Flags().Bool("modelfile", false, "Show Modelfile of a model")
showCmd.Flags().Bool("parameters", false, "Show parameters of a model")
showCmd.Flags().Bool("template", false, "Show template of a model")
showCmd.Flags().Bool("system", false, "Show system prompt of a model")
runCmd := &cobra.Command{
Use: "run MODEL [PROMPT]",
Short: "Run a model",
@@ -737,18 +898,21 @@ func NewCLI() *cobra.Command {
}
runCmd.Flags().Bool("verbose", false, "Show timings for response")
runCmd.Flags().Bool("insecure", false, "Use an insecure registry")
runCmd.Flags().Bool("nowordwrap", false, "Don't wrap words to the next line automatically")
serveCmd := &cobra.Command{
Use: "serve",
Aliases: []string{"start"},
Short: "Start ollama",
Args: cobra.ExactArgs(0),
RunE: RunServer,
}
pullCmd := &cobra.Command{
Use: "pull MODEL",
Short: "Pull a model from a registry",
Args: cobra.MinimumNArgs(1),
Args: cobra.ExactArgs(1),
PreRunE: checkServerHeartbeat,
RunE: PullHandler,
}
@@ -758,7 +922,7 @@ func NewCLI() *cobra.Command {
pushCmd := &cobra.Command{
Use: "push MODEL",
Short: "Push a model to a registry",
Args: cobra.MinimumNArgs(1),
Args: cobra.ExactArgs(1),
PreRunE: checkServerHeartbeat,
RunE: PushHandler,
}
@@ -774,15 +938,15 @@ func NewCLI() *cobra.Command {
}
copyCmd := &cobra.Command{
Use: "cp",
Use: "cp SOURCE TARGET",
Short: "Copy a model",
Args: cobra.MinimumNArgs(2),
Args: cobra.ExactArgs(2),
PreRunE: checkServerHeartbeat,
RunE: CopyHandler,
}
deleteCmd := &cobra.Command{
Use: "rm",
Use: "rm MODEL [MODEL...]",
Short: "Remove a model",
Args: cobra.MinimumNArgs(1),
PreRunE: checkServerHeartbeat,
@@ -792,6 +956,7 @@ func NewCLI() *cobra.Command {
rootCmd.AddCommand(
serveCmd,
createCmd,
showCmd,
runCmd,
pullCmd,
pushCmd,

View File

@@ -1,4 +0,0 @@
#!/bin/bash
cmake -B build
make -C build

View File

@@ -3,26 +3,32 @@
## Endpoints
- [Generate a completion](#generate-a-completion)
- [Create a model](#create-a-model)
- [List local models](#list-local-models)
- [Copy a model](#copy-a-model)
- [Delete a model](#delete-a-model)
- [Pull a model](#pull-a-model)
- [Generate embeddings](#generate-embeddings)
- [Create a Model](#create-a-model)
- [List Local Models](#list-local-models)
- [Show Model Information](#show-model-information)
- [Copy a Model](#copy-a-model)
- [Delete a Model](#delete-a-model)
- [Pull a Model](#pull-a-model)
- [Push a Model](#push-a-model)
- [Generate Embeddings](#generate-embeddings)
## Conventions
### Model names
Model names follow a `model:tag` format. Some examples are `orca:3b-q4_1` and `llama2:70b`. The tag is optional and if not provided will default to `latest`. The tag is used to identify a specific version.
Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` and `llama2:70b`. The tag is optional and, if not provided, will default to `latest`. The tag is used to identify a specific version.
### Durations
All durations are returned in nanoseconds.
### Streaming responses
Certain endpoints stream responses as JSON objects delineated with the newline (`\n`) character.
## Generate a completion
```
```shell
POST /api/generate
```
@@ -33,16 +39,17 @@ Generate a response for a given prompt with a provided model. This is a streamin
- `model`: (required) the [model name](#model-names)
- `prompt`: the prompt to generate a response for
Advanced parameters:
Advanced parameters (optional):
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
- `system`: system prompt to (overrides what is defined in the `Modelfile`)
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
- `stream`: if `false` the response will be be returned as a single response object, rather than a stream of objects
### Request
```
```shell
curl -X POST http://localhost:11434/api/generate -d '{
"model": "llama2:7b",
"prompt": "Why is the sky blue?"
@@ -73,6 +80,7 @@ The final response in the stream also includes additional data about the generat
- `eval_count`: number of tokens the response
- `eval_duration`: time in nanoseconds spent generating the response
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
- `response`: empty if the response was streamed, if not streamed, this will contain the full response
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`.
@@ -80,6 +88,7 @@ To calculate how fast the response is generated in tokens per second (token/s),
{
"model": "llama2:7b",
"created_at": "2023-08-04T19:22:45.499127Z",
"response": "",
"context": [1, 2, 3],
"done": true,
"total_duration": 5589157167,
@@ -95,7 +104,7 @@ To calculate how fast the response is generated in tokens per second (token/s),
## Create a Model
```
```shell
POST /api/create
```
@@ -105,10 +114,11 @@ Create a model from a [`Modelfile`](./modelfile.md)
- `name`: name of the model to create
- `path`: path to the Modelfile
- `stream`: (optional) if `false` the response will be be returned as a single response object, rather than a stream of objects
### Request
```
```shell
curl -X POST http://localhost:11434/api/create -d '{
"name": "mario",
"path": "~/Modelfile"
@@ -117,7 +127,7 @@ curl -X POST http://localhost:11434/api/create -d '{
### Response
A stream of JSON objects. When finished, `status` is `success`
A stream of JSON objects. When finished, `status` is `success`.
```json
{
@@ -127,7 +137,7 @@ A stream of JSON objects. When finished, `status` is `success`
## List Local Models
```
```shell
GET /api/tags
```
@@ -135,7 +145,7 @@ List models that are available locally.
### Request
```
```shell
curl http://localhost:11434/api/tags
```
@@ -158,9 +168,40 @@ curl http://localhost:11434/api/tags
}
```
## Show Model Information
```shell
POST /api/show
```
Show details about a model including modelfile, template, parameters, license, and system prompt.
### Parameters
- `name`: name of the model to show
### Request
```shell
curl http://localhost:11434/api/show -d '{
"name": "llama2:7b"
}'
```
### Response
```json
{
"license": "<contents of license block>",
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llama2:latest\n\nFROM /Users/username/.ollama/models/blobs/sha256:8daa9615cce30c259a9555b1cc250d461d1bc69980a274b44d7eda0be78076d8\nTEMPLATE \"\"\"[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] \"\"\"\nSYSTEM \"\"\"\"\"\"\nPARAMETER stop [INST]\nPARAMETER stop [/INST]\nPARAMETER stop <<SYS>>\nPARAMETER stop <</SYS>>\n",
"parameters": "stop [INST]\nstop [/INST]\nstop <<SYS>>\nstop <</SYS>>",
"template": "[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>\n\n{{ end }}{{ .Prompt }} [/INST] "
}
```
## Copy a Model
```
```shell
POST /api/copy
```
@@ -168,7 +209,7 @@ Copy a model. Creates a model with another name from an existing model.
### Request
```
```shell
curl http://localhost:11434/api/copy -d '{
"source": "llama2:7b",
"destination": "llama2-backup"
@@ -177,7 +218,7 @@ curl http://localhost:11434/api/copy -d '{
## Delete a Model
```
```shell
DELETE /api/delete
```
@@ -189,7 +230,7 @@ Delete a model and its data.
### Request
```
```shell
curl -X DELETE http://localhost:11434/api/delete -d '{
"name": "llama2:13b"
}'
@@ -197,19 +238,21 @@ curl -X DELETE http://localhost:11434/api/delete -d '{
## Pull a Model
```
```shell
POST /api/pull
```
Download a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple calls to will share the same download progress.
Download a model from the ollama library. Cancelled pulls are resumed from where they left off, and multiple calls will share the same download progress.
### Parameters
- `name`: name of the model to pull
- `insecure`: (optional) allow insecure connections to the library. Only use this if you are pulling from your own library during development.
- `stream`: (optional) if `false` the response will be be returned as a single response object, rather than a stream of objects
### Request
```
```shell
curl -X POST http://localhost:11434/api/pull -d '{
"name": "llama2:7b"
}'
@@ -225,9 +268,66 @@ curl -X POST http://localhost:11434/api/pull -d '{
}
```
## Push a Model
```shell
POST /api/push
```
Upload a model to a model library. Requires registering for ollama.ai and adding a public key first.
### Parameters
- `name`: name of the model to push in the form of `<namespace>/<model>:<tag>`
- `insecure`: (optional) allow insecure connections to the library. Only use this if you are pushing to your library during development.
- `stream`: (optional) if `false` the response will be be returned as a single response object, rather than a stream of objects
### Request
```shell
curl -X POST http://localhost:11434/api/push -d '{
"name": "mattw/pygmalion:latest"
}'
```
### Response
Streaming response that starts with:
```json
{ "status": "retrieving manifest" }
```
and then:
```json
{
"status": "starting upload",
"digest": "sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
"total": 1928429856
}
```
Then there is a series of uploading responses:
```json
{
"status": "starting upload",
"digest": "sha256:bc07c81de745696fdf5afca05e065818a8149fb0c77266fb584d9b2cba3711ab",
"total": 1928429856
}
```
Finally, when the upload is complete:
```json
{"status":"pushing manifest"}
{"status":"success"}
```
## Generate Embeddings
```
```shell
POST /api/embeddings
```
@@ -238,9 +338,13 @@ Generate embeddings from a model
- `model`: name of model to generate embeddings from
- `prompt`: text to generate embeddings for
Advanced parameters:
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
### Request
```
```shell
curl -X POST http://localhost:11434/api/embeddings -d '{
"model": "llama2:7b",
"prompt": "Here is an article about llamas..."
@@ -251,7 +355,7 @@ curl -X POST http://localhost:11434/api/embeddings -d '{
```json
{
"embeddings": [
"embedding": [
0.5670403838157654, 0.009260174818336964, 0.23178744316101074, -0.2916173040866852, -0.8924556970596313,
0.8785552978515625, -0.34576427936553955, 0.5742510557174683, -0.04222835972905159, -0.137906014919281
]

View File

@@ -1,44 +1,39 @@
# Development
- Install cmake or (optionally, required tools for GPUs)
- run `go generate ./...`
- run `go build .`
Install required tools:
```
brew install go
- cmake version 3.24 or higher
- go version 1.20 or higher
- gcc version 11.4.0 or higher
```bash
brew install go cmake gcc
```
Enable CGO:
Get the required libraries:
```bash
go generate ./...
```
export CGO_ENABLED=1
```
You will also need a C/C++ compiler such as GCC for MacOS and Linux or Mingw-w64 GCC for Windows.
Then build ollama:
```
```bash
go build .
```
Now you can run `ollama`:
```
```bash
./ollama
```
## Releasing
## Building on Linux with GPU support
To release a new version of Ollama you'll need to set some environment variables:
- `GITHUB_TOKEN`: your GitHub token
- `APPLE_IDENTITY`: the Apple signing identity (macOS only)
- `APPLE_ID`: your Apple ID
- `APPLE_PASSWORD`: your Apple ID app-specific password
- `APPLE_TEAM_ID`: the Apple team ID for the signing identity
- `TELEMETRY_WRITE_KEY`: segment write key for telemetry
Then run the publish script with the target version:
```
VERSION=0.0.2 ./scripts/publish.sh
```
- Install cmake and nvidia-cuda-toolkit
- run `go generate ./...`
- run `go build .`

View File

@@ -1,17 +1,79 @@
# FAQ
## How can I expose the Ollama server?
## How can I view the logs?
On macOS:
```
cat ~/.ollama/logs/server.log
```
On Linux:
```
journalctl -u ollama
```
If you're running `ollama serve` directly, the logs will be printed to the console.
## How can I expose Ollama on my network?
Ollama binds to 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
On macOS:
```bash
OLLAMA_HOST=0.0.0.0:11435 ollama serve
```
By default, Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0`. To support more origins, you can use the `OLLAMA_ORIGINS` environment variable:
On Linux:
Create a `systemd` drop-in directory and set `Environment=OLLAMA_HOST`
```bash
mkdir -p /etc/systemd/system/ollama.service.d
echo "[Service]" >>/etc/systemd/system/ollama.service.d/environment.conf
```
```bash
echo "Environment=OLLAMA_HOST=0.0.0.0:11434" >>/etc/systemd/system/ollama.service.d/environment.conf
```
Reload `systemd` and restart Ollama:
```bash
systemctl daemon-reload
systemctl restart ollama
```
## How can I allow additional web origins to access Ollama?
Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0` by default. Add additional origins with the `OLLAMA_ORIGINS` environment variable:
On macOS:
```bash
OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com ollama serve
```
On Linux:
```bash
echo "Environment=OLLAMA_ORIGINS=http://129.168.1.1:*,https://example.com" >>/etc/systemd/system/ollama.service.d/environment.conf
```
Reload `systemd` and restart Ollama:
```bash
systemctl daemon-reload
systemctl restart ollama
```
## Where are models stored?
Raw model data is stored under `~/.ollama/models`.
- macOS: Raw model data is stored under `~/.ollama/models`.
- Linux: Raw model data is stored under `/usr/share/ollama/.ollama/models`
### How can I change where Ollama stores models?
To modify where models are stored, you can use the `OLLAMA_MODELS` environment variable. Note that on Linux this means defining `OLLAMA_MODELS` in a drop-in `/etc/systemd/system/ollama.service.d` service file, reloading systemd, and restarting the ollama service.

198
docs/import.md Normal file
View File

@@ -0,0 +1,198 @@
# Import a model
This guide walks through importing a GGUF, PyTorch or Safetensors model.
## Importing (GGUF)
### Step 1: Write a `Modelfile`
Start by creating a `Modelfile`. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more.
```
FROM ./mistral-7b-v0.1.Q4_0.gguf
```
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
```
FROM ./q4_0.bin
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
```
### Step 2: Create the Ollama model
Finally, create a model from your `Modelfile`:
```
ollama create example -f Modelfile
```
### Step 3: Run your model
Next, test the model with `ollama run`:
```
ollama run example "What is your favourite condiment?"
```
## Importing (PyTorch & Safetensors)
### Supported models
Ollama supports a set of model architectures, with support for more coming soon:
- Llama & Mistral
- Falcon & RW
- GPT-NeoX
- BigCode
To view a model's architecture, check the `config.json` file in its HuggingFace repo. You should see an entry under `architectures` (e.g. `LlamaForCausalLM`).
### Step 1: Clone the HuggingFace repository (optional)
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
```
git lfs install
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
cd Mistral-7B-Instruct-v0.1
```
### Step 2: Convert and quantize to a `.bin` file (optional, for PyTorch and Safetensors)
If the model is in PyTorch or Safetensors format, a [Docker image](https://hub.docker.com/r/ollama/quantize) with the tooling required to convert and quantize models is available.
First, Install [Docker](https://www.docker.com/get-started/).
Next, to convert and quantize your model, run:
```
docker run --rm -v .:/model ollama/quantize -q q4_0 /model
```
This will output two files into the directory:
- `f16.bin`: the model converted to GGUF
- `q4_0.bin` the model quantized to a 4-bit quantization (we will use this file to create the Ollama model)
### Step 3: Write a `Modelfile`
Next, create a `Modelfile` for your model:
```
FROM ./q4_0.bin
```
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
```
FROM ./q4_0.bin
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
```
### Step 4: Create the Ollama model
Finally, create a model from your `Modelfile`:
```
ollama create example -f Modelfile
```
### Step 5: Run your model
Next, test the model with `ollama run`:
```
ollama run example "What is your favourite condiment?"
```
## Publishing your model (optional early alpha)
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
1. Create [an account](https://ollama.ai/signup)
2. Run `cat ~/.ollama/id_ed25519.pub` to view your Ollama public key. Copy this to the clipboard.
3. Add your public key to your [Ollama account](https://ollama.ai/settings/keys)
Next, copy your model to your username's namespace:
```
ollama cp example <your username>/example
```
Then push the model:
```
ollama push <your username>/example
```
After publishing, your model will be available at `https://ollama.ai/<your username>/example`.
## Quantization reference
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
- `q2_K`
- `q3_K`
- `q3_K_S`
- `q3_K_M`
- `q3_K_L`
- `q4_0` (recommended)
- `q4_1`
- `q4_K`
- `q4_K_S`
- `q4_K_M`
- `q5_0`
- `q5_1`
- `q5_K`
- `q5_K_S`
- `q5_K_M`
- `q6_K`
- `q8_0`
## Manually converting & quantizing models
### Prerequisites
Start by cloning the `llama.cpp` repo to your machine in another directory:
```
git clone https://github.com/ggerganov/llama.cpp.git
cd llama.cpp
```
Next, install the Python dependencies:
```
pip install -r requirements.txt
```
Finally, build the `quantize` tool:
```
make quantize
```
### Convert the model
Run the correct conversion script for your model architecture:
```shell
# LlamaForCausalLM or MistralForCausalLM
python convert.py <path to model directory>
# FalconForCausalLM
python convert-falcon-hf-to-gguf.py <path to model directory>
# GPTNeoXForCausalLM
python convert-falcon-hf-to-gguf.py <path to model directory>
# GPTBigCodeForCausalLM
python convert-starcoder-hf-to-gguf.py <path to model directory>
```
### Quantize the model
```
quantize <path to model dir>/ggml-model-f32.bin <path to model dir>/q4_0.bin q4_0
```

116
docs/linux.md Normal file
View File

@@ -0,0 +1,116 @@
# Ollama on Linux
## Install
Install Ollama running this one-liner:
>
```bash
curl https://ollama.ai/install.sh | sh
```
## Manual install
### Download the `ollama` binary
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
```bash
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
sudo chmod +x /usr/bin/ollama
```
### Adding Ollama as a startup service (recommended)
Create a user for Ollama:
```bash
sudo useradd -r -s /bin/false -m -d /usr/share/ollama ollama
```
Create a service file in `/etc/systemd/system/ollama.service`:
```ini
[Unit]
Description=Ollama Service
After=network-online.target
[Service]
ExecStart=/usr/bin/ollama serve
User=ollama
Group=ollama
Restart=always
RestartSec=3
[Install]
WantedBy=default.target
```
Then start the service:
```bash
sudo systemctl daemon-reload
sudo systemctl enable ollama
```
### Install CUDA drivers (optional for Nvidia GPUs)
[Download and install](https://developer.nvidia.com/cuda-downloads) CUDA.
Verify that the drivers are installed by running the following command, which should print details about your GPU:
```bash
nvidia-smi
```
### Start Ollama
Start Ollama using `systemd`:
```bash
sudo systemctl start ollama
```
## Update
Update ollama by running the install script again:
```bash
curl https://ollama.ai/install.sh | sh
```
Or by downloading the ollama binary:
```bash
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
sudo chmod +x /usr/bin/ollama
```
## Viewing logs
To view logs of Ollama running as a startup service, run:
```bash
journalctl -u ollama
```
## Uninstall
Remove the ollama service:
```bash
sudo systemctl stop ollama
sudo systemctl disable ollama
sudo rm /etc/systemd/system/ollama.service
```
Remove the ollama binary from your bin directory (either `/usr/local/bin`, `/usr/bin`, or `/bin`):
```bash
sudo rm $(which ollama)
```
Remove the downloaded models and Ollama service user:
```bash
sudo rm -r /usr/share/ollama
sudo userdel ollama
```

View File

@@ -1,6 +1,6 @@
# Ollama Model File
> Note: this model file syntax is in development
> Note: this `Modelfile` syntax is in development
A model file is the blueprint to create and share models with Ollama.
@@ -12,7 +12,6 @@ A model file is the blueprint to create and share models with Ollama.
- [FROM (Required)](#from-required)
- [Build from llama2](#build-from-llama2)
- [Build from a bin file](#build-from-a-bin-file)
- [EMBED](#embed)
- [PARAMETER](#parameter)
- [Valid Parameters and Values](#valid-parameters-and-values)
- [TEMPLATE](#template)
@@ -24,7 +23,7 @@ A model file is the blueprint to create and share models with Ollama.
## Format
The format of the Modelfile:
The format of the `Modelfile`:
```modelfile
# comment
@@ -42,9 +41,9 @@ INSTRUCTION arguments
## Examples
An example of a model file creating a mario blueprint:
An example of a `Modelfile` creating a mario blueprint:
```
```modelfile
FROM llama2
# sets the temperature to 1 [higher is more creative, lower is more coherent]
PARAMETER temperature 1
@@ -57,9 +56,9 @@ SYSTEM You are Mario from super mario bros, acting as an assistant.
To use this:
1. Save it as a file (eg. `Modelfile`)
2. `ollama create NAME -f <location of the file eg. ./Modelfile>'`
3. `ollama run NAME`
1. Save it as a file (e.g. `Modelfile`)
2. `ollama create choose-a-model-name -f <location of the file e.g. ./Modelfile>'`
3. `ollama run choose-a-model-name`
4. Start using the model!
More examples are available in the [examples directory](../examples).
@@ -68,44 +67,34 @@ More examples are available in the [examples directory](../examples).
### FROM (Required)
The FROM instruction defines the base model to use when creating a model.
The `FROM` instruction defines the base model to use when creating a model.
```
```modelfile
FROM <model name>:<tag>
```
#### Build from llama2
```
```modelfile
FROM llama2
```
A list of available base models:
<https://github.com/jmorganca/ollama#model-library>
#### Build from a bin file
#### Build from a `bin` file
```
```modelfile
FROM ./ollama-model.bin
```
This bin file location should be specified as an absolute path or relative to the Modelfile location.
### EMBED
The EMBED instruction is used to add embeddings of files to a model. This is useful for adding custom data that the model can reference when generating an answer. Note that currently only text files are supported, formatted with each line as one embedding.
```
FROM <model name>:<tag>
EMBED <file path>.txt
EMBED <different file path>.txt
EMBED <path to directory>/*.txt
```
This bin file location should be specified as an absolute path or relative to the `Modelfile` location.
### PARAMETER
The `PARAMETER` instruction defines a parameter that can be set when the model is run.
```
```modelfile
PARAMETER <parameter> <parametervalue>
```
@@ -118,19 +107,21 @@ PARAMETER <parameter> <parametervalue>
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
| num_gqa | The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b | int | num_gqa 1 |
| num_gpu | The number of GPUs to use. On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 1 |
| num_gpu | The number of layers to send to the GPU(s). On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 50 |
| num_thread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | num_thread 8 |
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
| stop | Sets the stop tokens to use. | string | stop "AI assistant:" |
| seed | Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) | int | seed 42 |
| stop | Sets the stop sequences to use. | string | stop "AI assistant:" |
| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 |
| num_predict | Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) | int | num_predict 42 |
| top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 |
| top_p | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) | float | top_p 0.9 |
### TEMPLATE
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific.
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
#### Template Variables
@@ -140,7 +131,7 @@ PARAMETER <parameter> <parametervalue>
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
```
```modelfile
TEMPLATE """
{{- if .First }}
### System:
@@ -160,7 +151,7 @@ SYSTEM """<system message>"""
The `SYSTEM` instruction specifies the system prompt to be used in the template, if applicable.
```
```modelfile
SYSTEM """<system message>"""
```
@@ -168,7 +159,7 @@ SYSTEM """<system message>"""
The `ADAPTER` instruction specifies the LoRA adapter to apply to the base model. The value of this instruction should be an absolute path or a path relative to the Modelfile and the file must be in a GGML file format. The adapter should be tuned from the base model otherwise the behaviour is undefined.
```
```modelfile
ADAPTER ./ollama-lora.bin
```
@@ -176,7 +167,7 @@ ADAPTER ./ollama-lora.bin
The `LICENSE` instruction allows you to specify the legal license under which the model used with this Modelfile is shared or distributed.
```
```modelfile
LICENSE """
<license text>
"""
@@ -184,5 +175,5 @@ LICENSE """
## Notes
- the **modelfile is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
- the **`Modelfile` is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.

171
examples/.gitignore vendored Normal file
View File

@@ -0,0 +1,171 @@
node_modules
# OSX
.DS_STORE
# Models
models/
# Local Chroma db
.chroma/
db/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

View File

@@ -1,15 +1,3 @@
# Examples
This directory contains different examples of using Ollama
To create a model:
```
ollama create example -f <example file>
```
To run a model:
```
ollama run example
```
This directory contains different examples of using Ollama.

View File

View File

@@ -0,0 +1,27 @@
package main
import (
"bytes"
"fmt"
"io"
"log"
"net/http"
"os"
)
func main() {
body := []byte(`{"model":"mistral"}`)
resp, err := http.Post("http://localhost:11434/api/generate", "application/json", bytes.NewBuffer(body))
if err != nil {
fmt.Print(err.Error())
os.Exit(1)
}
responseData, err := io.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(responseData))
}

View File

@@ -0,0 +1,21 @@
# LangChain
This example is a basic "hello world" of using LangChain with Ollama using Node.js and Typescript.
## Setup
```shell
npm install
```
## Run
```shell
ts-node main.ts
```
Running this example will print the response for "hello":
```plaintext
Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat?
```

View File

@@ -0,0 +1,15 @@
import { Ollama} from 'langchain/llms/ollama';
async function main() {
const ollama = new Ollama({
model: 'mistral'
// other parameters can be found at https://js.langchain.com/docs/api/llms_ollama/classes/Ollama
})
const stream = await ollama.stream("Hello");
for await (const chunk of stream) {
process.stdout.write(chunk);
}
}
main();

View File

@@ -0,0 +1,997 @@
{
"name": "with-langchain-typescript-simplegenerate",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"dependencies": {
"langchain": "^0.0.165"
},
"devDependencies": {
"typescript": "^5.2.2"
}
},
"node_modules/@anthropic-ai/sdk": {
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/@anthropic-ai/sdk/-/sdk-0.6.2.tgz",
"integrity": "sha512-fB9PUj9RFT+XjkL+E9Ol864ZIJi+1P8WnbHspN3N3/GK2uSzjd0cbVIKTGgf4v3N8MwaQu+UWnU7C4BG/fap/g==",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"digest-fetch": "^1.3.0",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
}
},
"node_modules/@types/node": {
"version": "18.18.4",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.4.tgz",
"integrity": "sha512-t3rNFBgJRugIhackit2mVcLfF6IRc0JE4oeizPQL8Zrm8n2WY/0wOdpOPhdtG0V9Q2TlW/axbF1MJ6z+Yj/kKQ=="
},
"node_modules/@types/node-fetch": {
"version": "2.6.6",
"resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.6.tgz",
"integrity": "sha512-95X8guJYhfqiuVVhRFxVQcf4hW/2bCuoPwDasMf/531STFoNoWTT7YDnWdXHEZKqAGUigmpG31r2FE70LwnzJw==",
"dependencies": {
"@types/node": "*",
"form-data": "^4.0.0"
}
},
"node_modules/@types/retry": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
"integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="
},
"node_modules/@types/uuid": {
"version": "9.0.5",
"resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.5.tgz",
"integrity": "sha512-xfHdwa1FMJ082prjSJpoEI57GZITiQz10r3vEJCHa2khEFQjKy91aWKz6+zybzssCvXUwE1LQWgWVwZ4nYUvHQ=="
},
"node_modules/abort-controller": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
"dependencies": {
"event-target-shim": "^5.0.0"
},
"engines": {
"node": ">=6.5"
}
},
"node_modules/agentkeepalive": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz",
"integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==",
"dependencies": {
"humanize-ms": "^1.2.1"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/ansi-styles": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz",
"integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/argparse": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
"integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
},
"node_modules/asynckit": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
"integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q=="
},
"node_modules/base-64": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/base-64/-/base-64-0.1.0.tgz",
"integrity": "sha512-Y5gU45svrR5tI2Vt/X9GPd3L0HNIKzGu202EjxrXMpuc2V2CiKgemAbUUsqYmZJvPtCXoUKjNZwBJzsNScUbXA=="
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/binary-extensions": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
"integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
"engines": {
"node": ">=8"
}
},
"node_modules/binary-search": {
"version": "1.3.6",
"resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz",
"integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA=="
},
"node_modules/camelcase": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
"integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/charenc": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz",
"integrity": "sha512-yrLQ/yVUFXkzg7EDQsPieE/53+0RlaWTs+wBrvW36cyilJ2SaDWfl4Yj7MtLTXleV9uEKefbAGUPv2/iWSooRA==",
"engines": {
"node": "*"
}
},
"node_modules/combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
"integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
"dependencies": {
"delayed-stream": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/commander": {
"version": "10.0.1",
"resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz",
"integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==",
"engines": {
"node": ">=14"
}
},
"node_modules/crypt": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz",
"integrity": "sha512-mCxBlsHFYh9C+HVpiEacem8FEBnMXgU9gy4zmNC+SXAZNB/1idgp/aulFJ4FgCi7GPEVbfyng092GqL2k2rmow==",
"engines": {
"node": "*"
}
},
"node_modules/decamelize": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/decamelize/-/decamelize-1.2.0.tgz",
"integrity": "sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/delayed-stream": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/digest-fetch": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/digest-fetch/-/digest-fetch-1.3.0.tgz",
"integrity": "sha512-CGJuv6iKNM7QyZlM2T3sPAdZWd/p9zQiRNS9G+9COUCwzWFTs0Xp8NF5iePx7wtvhDykReiRRrSeNb4oMmB8lA==",
"dependencies": {
"base-64": "^0.1.0",
"md5": "^2.3.0"
}
},
"node_modules/event-target-shim": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
"engines": {
"node": ">=6"
}
},
"node_modules/eventemitter3": {
"version": "4.0.7",
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
"integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
},
"node_modules/expr-eval": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/expr-eval/-/expr-eval-2.0.2.tgz",
"integrity": "sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg=="
},
"node_modules/flat": {
"version": "5.0.2",
"resolved": "https://registry.npmjs.org/flat/-/flat-5.0.2.tgz",
"integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==",
"bin": {
"flat": "cli.js"
}
},
"node_modules/form-data": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
"dependencies": {
"asynckit": "^0.4.0",
"combined-stream": "^1.0.8",
"mime-types": "^2.1.12"
},
"engines": {
"node": ">= 6"
}
},
"node_modules/form-data-encoder": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="
},
"node_modules/formdata-node": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
"dependencies": {
"node-domexception": "1.0.0",
"web-streams-polyfill": "4.0.0-beta.3"
},
"engines": {
"node": ">= 12.20"
}
},
"node_modules/humanize-ms": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
"dependencies": {
"ms": "^2.0.0"
}
},
"node_modules/is-any-array": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz",
"integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ=="
},
"node_modules/is-buffer": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
"integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
},
"node_modules/js-tiktoken": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.7.tgz",
"integrity": "sha512-biba8u/clw7iesNEWLOLwrNGoBP2lA+hTaBLs/D45pJdUPFXyxD6nhcDVtADChghv4GgyAiMKYMiRx7x6h7Biw==",
"dependencies": {
"base64-js": "^1.5.1"
}
},
"node_modules/js-yaml": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
"integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
"dependencies": {
"argparse": "^2.0.1"
},
"bin": {
"js-yaml": "bin/js-yaml.js"
}
},
"node_modules/jsonpointer": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz",
"integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/langchain": {
"version": "0.0.165",
"resolved": "https://registry.npmjs.org/langchain/-/langchain-0.0.165.tgz",
"integrity": "sha512-CpbNpjwaE+9lzjdw+pZz0VgnRrFivEgr7CVp9dDaAb5JpaJAA4V2v6uQ9ZPN+TSqupTQ79HFn2sfyZVEl2EG7Q==",
"dependencies": {
"@anthropic-ai/sdk": "^0.6.2",
"ansi-styles": "^5.0.0",
"binary-extensions": "^2.2.0",
"camelcase": "6",
"decamelize": "^1.2.0",
"expr-eval": "^2.0.2",
"flat": "^5.0.2",
"js-tiktoken": "^1.0.7",
"js-yaml": "^4.1.0",
"jsonpointer": "^5.0.1",
"langchainhub": "~0.0.6",
"langsmith": "~0.0.31",
"ml-distance": "^4.0.0",
"object-hash": "^3.0.0",
"openai": "~4.4.0",
"openapi-types": "^12.1.3",
"p-queue": "^6.6.2",
"p-retry": "4",
"uuid": "^9.0.0",
"yaml": "^2.2.1",
"zod": "^3.22.3",
"zod-to-json-schema": "^3.20.4"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"@aws-crypto/sha256-js": "^5.0.0",
"@aws-sdk/client-bedrock-runtime": "^3.422.0",
"@aws-sdk/client-dynamodb": "^3.310.0",
"@aws-sdk/client-kendra": "^3.352.0",
"@aws-sdk/client-lambda": "^3.310.0",
"@aws-sdk/client-s3": "^3.310.0",
"@aws-sdk/client-sagemaker-runtime": "^3.310.0",
"@aws-sdk/client-sfn": "^3.310.0",
"@aws-sdk/credential-provider-node": "^3.388.0",
"@azure/storage-blob": "^12.15.0",
"@clickhouse/client": "^0.0.14",
"@cloudflare/ai": "^1.0.12",
"@elastic/elasticsearch": "^8.4.0",
"@getmetal/metal-sdk": "*",
"@getzep/zep-js": "^0.7.0",
"@gomomento/sdk": "^1.23.0",
"@google-ai/generativelanguage": "^0.2.1",
"@google-cloud/storage": "^6.10.1",
"@huggingface/inference": "^1.5.1",
"@mozilla/readability": "*",
"@notionhq/client": "^2.2.10",
"@opensearch-project/opensearch": "*",
"@pinecone-database/pinecone": "^1.1.0",
"@planetscale/database": "^1.8.0",
"@qdrant/js-client-rest": "^1.2.0",
"@raycast/api": "^1.55.2",
"@smithy/eventstream-codec": "^2.0.5",
"@smithy/protocol-http": "^3.0.6",
"@smithy/signature-v4": "^2.0.10",
"@smithy/util-utf8": "^2.0.0",
"@supabase/postgrest-js": "^1.1.1",
"@supabase/supabase-js": "^2.10.0",
"@tensorflow-models/universal-sentence-encoder": "*",
"@tensorflow/tfjs-converter": "*",
"@tensorflow/tfjs-core": "*",
"@upstash/redis": "^1.20.6",
"@vercel/postgres": "^0.5.0",
"@writerai/writer-sdk": "^0.40.2",
"@xata.io/client": "^0.25.1",
"@xenova/transformers": "^2.5.4",
"@zilliz/milvus2-sdk-node": ">=2.2.7",
"apify-client": "^2.7.1",
"axios": "*",
"cassandra-driver": "^4.6.4",
"cheerio": "^1.0.0-rc.12",
"chromadb": "*",
"cohere-ai": ">=6.0.0",
"d3-dsv": "^2.0.0",
"epub2": "^3.0.1",
"faiss-node": "^0.3.0",
"fast-xml-parser": "^4.2.7",
"firebase-admin": "^11.9.0",
"google-auth-library": "^8.9.0",
"googleapis": "^126.0.1",
"hnswlib-node": "^1.4.2",
"html-to-text": "^9.0.5",
"ignore": "^5.2.0",
"ioredis": "^5.3.2",
"jsdom": "*",
"llmonitor": "*",
"lodash": "^4.17.21",
"mammoth": "*",
"mongodb": "^5.2.0",
"mysql2": "^3.3.3",
"neo4j-driver": "*",
"node-llama-cpp": "*",
"notion-to-md": "^3.1.0",
"pdf-parse": "1.1.1",
"peggy": "^3.0.2",
"pg": "^8.11.0",
"pg-copy-streams": "^6.0.5",
"pickleparser": "^0.1.0",
"playwright": "^1.32.1",
"portkey-ai": "^0.1.11",
"puppeteer": "^19.7.2",
"redis": "^4.6.4",
"replicate": "^0.18.0",
"sonix-speech-recognition": "^2.1.1",
"srt-parser-2": "^1.2.2",
"typeorm": "^0.3.12",
"typesense": "^1.5.3",
"usearch": "^1.1.1",
"vectordb": "^0.1.4",
"voy-search": "0.6.2",
"weaviate-ts-client": "^1.4.0",
"web-auth-library": "^1.0.3",
"youtube-transcript": "^1.0.6",
"youtubei.js": "^5.8.0"
},
"peerDependenciesMeta": {
"@aws-crypto/sha256-js": {
"optional": true
},
"@aws-sdk/client-bedrock-runtime": {
"optional": true
},
"@aws-sdk/client-dynamodb": {
"optional": true
},
"@aws-sdk/client-kendra": {
"optional": true
},
"@aws-sdk/client-lambda": {
"optional": true
},
"@aws-sdk/client-s3": {
"optional": true
},
"@aws-sdk/client-sagemaker-runtime": {
"optional": true
},
"@aws-sdk/client-sfn": {
"optional": true
},
"@aws-sdk/credential-provider-node": {
"optional": true
},
"@azure/storage-blob": {
"optional": true
},
"@clickhouse/client": {
"optional": true
},
"@cloudflare/ai": {
"optional": true
},
"@elastic/elasticsearch": {
"optional": true
},
"@getmetal/metal-sdk": {
"optional": true
},
"@getzep/zep-js": {
"optional": true
},
"@gomomento/sdk": {
"optional": true
},
"@google-ai/generativelanguage": {
"optional": true
},
"@google-cloud/storage": {
"optional": true
},
"@huggingface/inference": {
"optional": true
},
"@mozilla/readability": {
"optional": true
},
"@notionhq/client": {
"optional": true
},
"@opensearch-project/opensearch": {
"optional": true
},
"@pinecone-database/pinecone": {
"optional": true
},
"@planetscale/database": {
"optional": true
},
"@qdrant/js-client-rest": {
"optional": true
},
"@raycast/api": {
"optional": true
},
"@smithy/eventstream-codec": {
"optional": true
},
"@smithy/protocol-http": {
"optional": true
},
"@smithy/signature-v4": {
"optional": true
},
"@smithy/util-utf8": {
"optional": true
},
"@supabase/postgrest-js": {
"optional": true
},
"@supabase/supabase-js": {
"optional": true
},
"@tensorflow-models/universal-sentence-encoder": {
"optional": true
},
"@tensorflow/tfjs-converter": {
"optional": true
},
"@tensorflow/tfjs-core": {
"optional": true
},
"@upstash/redis": {
"optional": true
},
"@vercel/postgres": {
"optional": true
},
"@writerai/writer-sdk": {
"optional": true
},
"@xata.io/client": {
"optional": true
},
"@xenova/transformers": {
"optional": true
},
"@zilliz/milvus2-sdk-node": {
"optional": true
},
"apify-client": {
"optional": true
},
"axios": {
"optional": true
},
"cassandra-driver": {
"optional": true
},
"cheerio": {
"optional": true
},
"chromadb": {
"optional": true
},
"cohere-ai": {
"optional": true
},
"d3-dsv": {
"optional": true
},
"epub2": {
"optional": true
},
"faiss-node": {
"optional": true
},
"fast-xml-parser": {
"optional": true
},
"firebase-admin": {
"optional": true
},
"google-auth-library": {
"optional": true
},
"googleapis": {
"optional": true
},
"hnswlib-node": {
"optional": true
},
"html-to-text": {
"optional": true
},
"ignore": {
"optional": true
},
"ioredis": {
"optional": true
},
"jsdom": {
"optional": true
},
"llmonitor": {
"optional": true
},
"lodash": {
"optional": true
},
"mammoth": {
"optional": true
},
"mongodb": {
"optional": true
},
"mysql2": {
"optional": true
},
"neo4j-driver": {
"optional": true
},
"node-llama-cpp": {
"optional": true
},
"notion-to-md": {
"optional": true
},
"pdf-parse": {
"optional": true
},
"peggy": {
"optional": true
},
"pg": {
"optional": true
},
"pg-copy-streams": {
"optional": true
},
"pickleparser": {
"optional": true
},
"playwright": {
"optional": true
},
"portkey-ai": {
"optional": true
},
"puppeteer": {
"optional": true
},
"redis": {
"optional": true
},
"replicate": {
"optional": true
},
"sonix-speech-recognition": {
"optional": true
},
"srt-parser-2": {
"optional": true
},
"typeorm": {
"optional": true
},
"typesense": {
"optional": true
},
"usearch": {
"optional": true
},
"vectordb": {
"optional": true
},
"voy-search": {
"optional": true
},
"weaviate-ts-client": {
"optional": true
},
"web-auth-library": {
"optional": true
},
"youtube-transcript": {
"optional": true
},
"youtubei.js": {
"optional": true
}
}
},
"node_modules/langchainhub": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/langchainhub/-/langchainhub-0.0.6.tgz",
"integrity": "sha512-SW6105T+YP1cTe0yMf//7kyshCgvCTyFBMTgH2H3s9rTAR4e+78DA/BBrUL/Mt4Q5eMWui7iGuAYb3pgGsdQ9w=="
},
"node_modules/langsmith": {
"version": "0.0.42",
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.0.42.tgz",
"integrity": "sha512-sFuN+e7E+pPBIRaRgFqZh/BRBWNHTZNAwi6uj4kydQawooCZYoJmM5snOkiQrhVSvAhgu6xFhLvmfvkPcKzD7w==",
"dependencies": {
"@types/uuid": "^9.0.1",
"commander": "^10.0.1",
"p-queue": "^6.6.2",
"p-retry": "4",
"uuid": "^9.0.0"
},
"bin": {
"langsmith": "dist/cli/main.cjs"
}
},
"node_modules/md5": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/md5/-/md5-2.3.0.tgz",
"integrity": "sha512-T1GITYmFaKuO91vxyoQMFETst+O71VUPEU3ze5GNzDm0OWdP8v1ziTaAEPUr/3kLsY3Sftgz242A1SetQiDL7g==",
"dependencies": {
"charenc": "0.0.2",
"crypt": "0.0.2",
"is-buffer": "~1.1.6"
}
},
"node_modules/mime-db": {
"version": "1.52.0",
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
"integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/mime-types": {
"version": "2.1.35",
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
"integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
"dependencies": {
"mime-db": "1.52.0"
},
"engines": {
"node": ">= 0.6"
}
},
"node_modules/ml-array-mean": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/ml-array-mean/-/ml-array-mean-1.1.6.tgz",
"integrity": "sha512-MIdf7Zc8HznwIisyiJGRH9tRigg3Yf4FldW8DxKxpCCv/g5CafTw0RRu51nojVEOXuCQC7DRVVu5c7XXO/5joQ==",
"dependencies": {
"ml-array-sum": "^1.1.6"
}
},
"node_modules/ml-array-sum": {
"version": "1.1.6",
"resolved": "https://registry.npmjs.org/ml-array-sum/-/ml-array-sum-1.1.6.tgz",
"integrity": "sha512-29mAh2GwH7ZmiRnup4UyibQZB9+ZLyMShvt4cH4eTK+cL2oEMIZFnSyB3SS8MlsTh6q/w/yh48KmqLxmovN4Dw==",
"dependencies": {
"is-any-array": "^2.0.0"
}
},
"node_modules/ml-distance": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/ml-distance/-/ml-distance-4.0.1.tgz",
"integrity": "sha512-feZ5ziXs01zhyFUUUeZV5hwc0f5JW0Sh0ckU1koZe/wdVkJdGxcP06KNQuF0WBTj8FttQUzcvQcpcrOp/XrlEw==",
"dependencies": {
"ml-array-mean": "^1.1.6",
"ml-distance-euclidean": "^2.0.0",
"ml-tree-similarity": "^1.0.0"
}
},
"node_modules/ml-distance-euclidean": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/ml-distance-euclidean/-/ml-distance-euclidean-2.0.0.tgz",
"integrity": "sha512-yC9/2o8QF0A3m/0IXqCTXCzz2pNEzvmcE/9HFKOZGnTjatvBbsn4lWYJkxENkA4Ug2fnYl7PXQxnPi21sgMy/Q=="
},
"node_modules/ml-tree-similarity": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/ml-tree-similarity/-/ml-tree-similarity-1.0.0.tgz",
"integrity": "sha512-XJUyYqjSuUQkNQHMscr6tcjldsOoAekxADTplt40QKfwW6nd++1wHWV9AArl0Zvw/TIHgNaZZNvr8QGvE8wLRg==",
"dependencies": {
"binary-search": "^1.3.5",
"num-sort": "^2.0.0"
}
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
},
"node_modules/node-domexception": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
"integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/jimmywarting"
},
{
"type": "github",
"url": "https://paypal.me/jimmywarting"
}
],
"engines": {
"node": ">=10.5.0"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/num-sort": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz",
"integrity": "sha512-1MQz1Ed8z2yckoBeSfkQHHO9K1yDRxxtotKSJ9yvcTUUxSvfvzEq5GwBrjjHEpMlq/k5gvXdmJ1SbYxWtpNoVg==",
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/object-hash": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz",
"integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==",
"engines": {
"node": ">= 6"
}
},
"node_modules/openai": {
"version": "4.4.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-4.4.0.tgz",
"integrity": "sha512-JN0t628Kh95T0IrXl0HdBqnlJg+4Vq0Bnh55tio+dfCnyzHvMLiWyCM9m726MAJD2YkDU4/8RQB6rNbEq9ct2w==",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"digest-fetch": "^1.3.0",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
},
"bin": {
"openai": "bin/cli"
}
},
"node_modules/openapi-types": {
"version": "12.1.3",
"resolved": "https://registry.npmjs.org/openapi-types/-/openapi-types-12.1.3.tgz",
"integrity": "sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw=="
},
"node_modules/p-finally": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
"integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==",
"engines": {
"node": ">=4"
}
},
"node_modules/p-queue": {
"version": "6.6.2",
"resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz",
"integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==",
"dependencies": {
"eventemitter3": "^4.0.4",
"p-timeout": "^3.2.0"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/p-retry": {
"version": "4.6.2",
"resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
"integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
"dependencies": {
"@types/retry": "0.12.0",
"retry": "^0.13.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/p-timeout": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz",
"integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==",
"dependencies": {
"p-finally": "^1.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/retry": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
"integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
"engines": {
"node": ">= 4"
}
},
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
},
"node_modules/typescript": {
"version": "5.2.2",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz",
"integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==",
"dev": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
},
"engines": {
"node": ">=14.17"
}
},
"node_modules/uuid": {
"version": "9.0.1",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz",
"integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==",
"funding": [
"https://github.com/sponsors/broofa",
"https://github.com/sponsors/ctavan"
],
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/web-streams-polyfill": {
"version": "4.0.0-beta.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
"engines": {
"node": ">= 14"
}
},
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/yaml": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.2.tgz",
"integrity": "sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==",
"engines": {
"node": ">= 14"
}
},
"node_modules/zod": {
"version": "3.22.4",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.22.4.tgz",
"integrity": "sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-to-json-schema": {
"version": "3.21.4",
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.21.4.tgz",
"integrity": "sha512-fjUZh4nQ1s6HMccgIeE0VP4QG/YRGPmyjO9sAh890aQKPEk3nqbfUXhMFaC+Dr5KvYBm8BCyvfpZf2jY9aGSsw==",
"peerDependencies": {
"zod": "^3.21.4"
}
}
}
}

View File

@@ -0,0 +1,8 @@
{
"devDependencies": {
"typescript": "^5.2.2"
},
"dependencies": {
"langchain": "^0.0.165"
}
}

View File

@@ -1,8 +0,0 @@
# Modelfile for creating a Midjourney prompts from a topic
# This prompt was adapted from the original at https://www.greataiprompts.com/guide/midjourney/best-chatgpt-prompt-for-midjourney/
# Run `ollama create mj -f ./Modelfile` and then `ollama run mj` and enter a topic
FROM nous-hermes
SYSTEM """
Embrace your role as an AI-powered creative assistant, employing Midjourney to manifest compelling AI-generated art. I will outline a specific image concept, and in response, you must produce an exhaustive, multifaceted prompt for Midjourney, ensuring every detail of the original concept is represented in your instructions. Midjourney doesn't do well with text, so after the prompt, give me instructions that I can use to create the titles in a image editor.
"""

View File

@@ -0,0 +1,7 @@
# Modelfile for creating a list of ten tweets from a topic
# Run `ollama create 10tweets -f ./Modelfile` and then `ollama run 10tweets` and enter a topic
FROM llama2
SYSTEM """
You are a content marketer who needs to come up with 10 short but succinct tweets. The answer should be a list of ten tweets. Each tweet can have a maximum of 280 characters and should include hashtags. Each user input will be a subject and you should expand it in ten creative ways. Never stop after just one tweet. Always include ten.
"""

View File

@@ -0,0 +1,23 @@
# Ten Tweets Modelfile
This is a simple modelfile that generates ten tweets based off any topic.
```bash
ollama create tentweets
ollama run tentweets
>>> underwater basketweaving
Great! Here are ten creative tweets about underwater basketweaving:
1. "Just discovered the ultimate stress-reliever: Underwater basketweaving! 🌊🧵 #UnderwaterBasketweaving #StressRelief"
2. "Who needs meditation when you can do underwater basketweaving? 😴👀 #PeacefulDistraction #UnderwaterBasketweaving"
3. "Just spent an hour in the pool and still managed to knot my basket. Goal: untangle it before next session. 💪🏽 #ChallengeAccepted #UnderwaterBasketweaving"
4. "When life gives you lemons, make underwater basketweaving! 🍋🧵 #LemonadeLife #UnderwaterBasketweaving"
5. "Just realized my underwater basketweaving skills could come in handy during a zombie apocalypse. 😂🧡 #SurvivalTips #UnderwaterBasketweaving"
6. "I'm not lazy, I'm just conserving energy for my next underwater basketweaving session. 😴💤 #LazyDay #UnderwaterBasketweaving"
7. "Just found my inner peace while doing underwater basketweaving. It's like meditation, but with knots! 🙏🧵 #Mindfulness #UnderwaterBasketweaving"
8. "Why study for exams when you can do underwater basketweaving and forget all your worries? 😜🧵 #ProcrastinationStation #UnderwaterBasketweaving"
9. "Just had to cut my underwater basketweaving session short due to a sudden urge to breathe. 🤯🌊 #AquaticAdventures #UnderwaterBasketweaving"
10. "I'm not sure what's more impressive: my underwater basketweaving skills or the fact that I didn't drown trying to make this tweet. 😅🧵 #Accomplishment
#UnderwaterBasketweaving"
```

View File

@@ -1,7 +1,7 @@
# Modelfile for creating a devops engineer assistant
# Run `ollama create devops-engineer -f ./Modelfile` and then `ollama run devops-engineer` and enter a topic
FROM llama2:13b
FROM mistral
PARAMETER temperature 1
SYSTEM """
You are a senior devops engineer, acting as an assistant. You offer help with cloud technologies like: Terraform, AWS, kubernetes, python. You answer with code examples when possible

View File

@@ -0,0 +1,40 @@
# DevOps Engineer Modelfile
This is a simple modelfile that takes will respond to any questions that a senior DevOps engineer should know how to solve.
````text
ollama create devopsengineer
ollama run devopsengineer
>>> how can i proxy all connections from inside the office through a single server that caches large files. Provide a config file for nginx to achieve this?
Here is an example configuration file for Nginx that can be used to proxy all connections from inside the office network to a centralized cache server:
```perl
http {
upstream cache_server {
server <cache_server_ip>;
server <cache_server_ip>;
server <cache_server_ip>;
}
server {
listen 80;
server_name localhost;
location / {
proxy_pass http://cache_server;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
}
```
In this configuration, we define an `upstream` block that lists the IP addresses of the cache servers. We then define a server block that listens on port 80 and routes all
traffic to the `/` location to the cache servers using the `proxy_pass` directive. The `proxy_set_header` directives are used to preserve the source IP address of the client
request when forwarding it to the cache server.
To use this configuration, you would need to replace the placeholder `<cache_server_ip>` with the actual IP addresses of your cache servers. You would also need to make sure
that the cache servers are configured to accept incoming connections from the Nginx server and handle requests for files.
````

View File

Before

Width:  |  Height:  |  Size: 446 KiB

After

Width:  |  Height:  |  Size: 446 KiB

View File

@@ -0,0 +1,11 @@
# Modelfile for creating a Midjourney prompts from a topic
# This prompt was adapted from the original at https://www.greataiprompts.com/guide/midjourney/best-chatgpt-prompt-for-midjourney/
# Run `ollama create mj -f ./Modelfile` and then `ollama run mj` and enter a topic
FROM zephyr
PARAMETER temperature 0.8
PARAMETER top_k 500
PARAMETER top_p 0.9
SYSTEM """
Embrace your role as a creative illustrator. Based on a concept provided, you must produce a single paragraph with a multifaceted description of an image, ensuring significant details of the concept and more is represented in your instructions. You do not need to write complete sentences but rather short concepts with the following information: the level of detail that should be represented, an artistic style and maybe a specific name of a painter or illustrator, the ideal color pallete, lighting, mood, perspective, the setting, time of day, weather, the season, the time period, location, materials, the textures, patterns, lines, brushstrokes, techniques, the medium, the genre, the rendering style. Don't include everything and keep the description length under 250 words.
"""

View File

@@ -0,0 +1,11 @@
# Midjourney Prompt Generator Modelfile
This simple modelfile will help create a prompt to feed to Midjourney.
```text
ollama create midjourney
ollama run midjourney
>>> a sports car in the mountains.
A sleek, high-performance automobile cuts through a serpentine mountain landscape. The concept is a classic illustration of speed and power, depicted in the style of pop art by Andy Warhol. The color palette is dominated by bold, primary hues of red, blue, and yellow, with striking accent colors of white, black, and metallic shades. The lighting is bright and focused, casting sharp shadows on the rugged terrain. A sense of excitement and anticipation permeates throughout the scene, as the car navigates a treacherous course through the winding road. The perspective is low, allowing for a full view of the vehicle's sleek lines and intricate details. The setting takes place in the afternoon during a sunny day in autumn, as evidenced by the vibrant foliage on the mountainside. The time period is modern, with nods to classic car design. The materials are primarily digital, allowing for smooth curves and sharp contrasts. The textures are sleek and polished, with meticulously detailed lines and brushstrokes that accentuate the car's aerodynamic design. The patterns consist of geometric shapes and bold stripes, adding to the car's dynamic appeal. The genre is modern realism, with a focus on precision and detail. The rendering style is highly technical, capturing the nuances and subtleties of the vehicle and its surroundings in breathtaking detail.
```

View File

@@ -0,0 +1,20 @@
# Recipe Maker Modelfile
Simple modelfile to generate a recipe from a short list of ingredients.
```
ollama create recipemaker
ollama run recipemaker
>>> chilli pepper, white chocolate, kale
Ingredients:
- 1 small chili pepper
- 4 squares of white chocolate
- handful of kale leaves
Instructions:
1. In a blender or food processor, puree the chilies and white chocolate until smooth.
2. Add the chopped kale leaves to the blender and pulse until well combined.
3. Serve immediately as a dip for crackers or use it as an ingredient in your favorite recipe. The mixture of spicy chili pepper with sweet white chocolate and nutritious
kale will make your taste buds dance with delight!
```

View File

@@ -0,0 +1,28 @@
# Modelfile for creating a sentiment analyzer.
# Run `ollama create sentiments -f pathtofile` and then `ollama run sentiments` and enter a topic
FROM orca
TEMPLATE """
{{- if .First }}
### System:
{{ .System }}
{{- end }}
### User:
I hate it when my phone dies
### Response:
NEGATIVE
### User:
He is awesome
### Response:
POSITIVE
### User:
This is the link to the article
### Response:
NEUTRAL
### User:
{{ .Prompt }}
### Response:
"""
SYSTEM """You are a sentiment analyzer. You will receive text and output only one word, either POSITIVE or NEGATIVE or NEUTRAL, depending on the sentiment of the text."""

View File

@@ -0,0 +1,25 @@
# Sentiments Modelfile
This is a simple sentiments analyzer using the Orca model. When you pull Orca from the registry, it has a Template already defined that looks like this:
```Modelfile
{{- if .First }}
### System:
{{ .System }}
{{- end }}
### User:
{{ .Prompt }}
### Response:
```
If we just wanted to have the text:
```Plaintext
You are a sentiment analyzer. You will receive text and output only one word, either POSITIVE or NEGATIVE or NEUTRAL, depending on the sentiment of the text.
```
then we could have put this in a SYSTEM block. But we want to provide examples which require updating the full Template. Any Modelfile you create will inherit all the settings from the source model. But in this example, we are overriding the Template.
When providing examples for the input and output, you should include the way the model usually provides information. Since the Orca model expects a user prompt to appear after ### User: and the response is after ### Response, we should format our examples like that as well. If we were using the Llama 2 model, the format would be a bit different.

View File

@@ -1,6 +1,6 @@
FROM llama2
FROM mistral
SYSTEM """
You are an experience Devops engineer focused on docker. When given specifications for a particular need or application you know the best way to host that within a docker container. For instance if someone tells you they want an nginx server to host files located at /web you will answer as follows
You are an experienced Devops engineer focused on docker. When given specifications for a particular need or application you know the best way to host that within a docker container. For instance if someone tells you they want an nginx server to host files located at /web you will answer as follows
---start
FROM nginx:alpine
@@ -16,5 +16,5 @@ ENV POSTGRES_PASSWORD=abc123
EXPOSE 5432
---end
Again it's just the contents of the dockerfile an nothing else.
"""
Again it's just the contents of the dockerfile and nothing else.
"""

View File

@@ -0,0 +1,22 @@
# News Summarizer
This example goes through a series of steps:
1. You choose a topic area (e.g., "news", "NVidia", "music", etc.).
2. Gets the most recent articles on that topic from various sources.
3. Uses Ollama to summarize each article.
4. Creates chunks of sentences from each article.
5. Uses Sentence Transformers to generate embeddings for each of those chunks.
6. You enter a question regarding the summaries shown.
7. Uses Sentence Transformers to generate an embedding for that question.
8. Uses the embedded question to find the most similar chunks.
9. Feeds all that to Ollama to generate a good answer to your question based on these news articles.
This example lets you pick from a few different topic areas, then summarize the most recent x articles for that topic. It then creates chunks of sentences from each article and then generates embeddings for each of those chunks.
You can run the example like this:
```bash
pip install -r requirements.txt
python summ.py
```

View File

@@ -0,0 +1,9 @@
beautifulsoup4==4.12.2
feedparser==6.0.10
mattsollamatools==0.0.8
newspaper3k==0.2.8
nltk==3.8.1
numpy==1.24.3
Requests==2.31.0
scikit_learn==1.3.0
sentence_transformers==2.2.2

View File

@@ -0,0 +1,86 @@
import curses
import json
from utils import get_url_for_topic, topic_urls, menu, getUrls, get_summary, getArticleText, knn_search
import requests
from sentence_transformers import SentenceTransformer
from mattsollamatools import chunker
if __name__ == "__main__":
chosen_topic = curses.wrapper(menu)
print("Here is your news summary:\n")
urls = getUrls(chosen_topic, n=5)
model = SentenceTransformer('all-MiniLM-L6-v2')
allEmbeddings = []
for url in urls:
article={}
article['embeddings'] = []
article['url'] = url
text = getArticleText(url)
summary = get_summary(text)
chunks = chunker(text) # Use the chunk_text function from web_utils
embeddings = model.encode(chunks)
for (chunk, embedding) in zip(chunks, embeddings):
item = {}
item['source'] = chunk
item['embedding'] = embedding.tolist() # Convert NumPy array to list
item['sourcelength'] = len(chunk)
article['embeddings'].append(item)
allEmbeddings.append(article)
print(f"{summary}\n")
while True:
context = []
# Input a question from the user
question = input("Enter your question about the news, or type quit: ")
if question.lower() == 'quit':
break
# Embed the user's question
question_embedding = model.encode([question])
# Perform KNN search to find the best matches (indices and source text)
best_matches = knn_search(question_embedding, allEmbeddings, k=10)
sourcetext=""
for i, (index, source_text) in enumerate(best_matches, start=1):
sourcetext += f"{i}. Index: {index}, Source Text: {source_text}"
systemPrompt = f"Only use the following information to answer the question. Do not use anything else: {sourcetext}"
url = "http://localhost:11434/api/generate"
payload = {
"model": "mistral-openorca",
"prompt": question,
"system": systemPrompt,
"stream": False,
"context": context
}
# Convert the payload to a JSON string
payload_json = json.dumps(payload)
# Set the headers to specify JSON content
headers = {
"Content-Type": "application/json"
}
# Send the POST request
response = requests.post(url, data=payload_json, headers=headers)
# Check the response
if response.status_code == 200:
output = json.loads(response.text)
context = output['context']
print(output['response']+ "\n")
else:
print(f"Request failed with status code {response.status_code}")

View File

@@ -0,0 +1,108 @@
import curses
import feedparser
import requests
import unicodedata
import json
from newspaper import Article
from bs4 import BeautifulSoup
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy as np
from sklearn.neighbors import NearestNeighbors
from mattsollamatools import chunker
# Create a dictionary to store topics and their URLs
topic_urls = {
"Mac": "https://9to5mac.com/guides/mac/feed",
"News": "http://www.npr.org/rss/rss.php?id=1001",
"Nvidia": "https://nvidianews.nvidia.com/releases.xml",
"Raspberry Pi": "https://www.raspberrypi.com/news/feed/",
"Music": "https://www.billboard.com/c/music/music-news/feed/"
}
# Use curses to create a menu of topics
def menu(stdscr):
chosen_topic = get_url_for_topic(stdscr)
url = topic_urls[chosen_topic] if chosen_topic in topic_urls else "Topic not found"
stdscr.addstr(len(topic_urls) + 3, 0, f"Selected URL for {chosen_topic}: {url}")
stdscr.refresh()
return chosen_topic
# You have chosen a topic. Now return the url for that topic
def get_url_for_topic(stdscr):
curses.curs_set(0) # Hide the cursor
stdscr.clear()
stdscr.addstr(0, 0, "Choose a topic using the arrow keys (Press Enter to select):")
# Create a list of topics
topics = list(topic_urls.keys())
current_topic = 0
while True:
for i, topic in enumerate(topics):
if i == current_topic:
stdscr.addstr(i + 2, 2, f"> {topic}")
else:
stdscr.addstr(i + 2, 2, f" {topic}")
stdscr.refresh()
key = stdscr.getch()
if key == curses.KEY_DOWN and current_topic < len(topics) - 1:
current_topic += 1
elif key == curses.KEY_UP and current_topic > 0:
current_topic -= 1
elif key == 10: # Enter key
return topic_urls[topics[current_topic]]
# Get the last N URLs from an RSS feed
def getUrls(feed_url, n=20):
feed = feedparser.parse(feed_url)
entries = feed.entries[-n:]
urls = [entry.link for entry in entries]
return urls
# Often there are a bunch of ads and menus on pages for a news article. This uses newspaper3k to get just the text of just the article.
def getArticleText(url):
article = Article(url)
article.download()
article.parse()
return article.text
def get_summary(text):
systemPrompt = "Write a concise summary of the text, return your responses with 5 lines that cover the key points of the text given."
prompt = text
url = "http://localhost:11434/api/generate"
payload = {
"model": "mistral-openorca",
"prompt": prompt,
"system": systemPrompt,
"stream": False
}
payload_json = json.dumps(payload)
headers = {"Content-Type": "application/json"}
response = requests.post(url, data=payload_json, headers=headers)
return json.loads(response.text)["response"]
# Perform K-nearest neighbors (KNN) search
def knn_search(question_embedding, embeddings, k=5):
X = np.array([item['embedding'] for article in embeddings for item in article['embeddings']])
source_texts = [item['source'] for article in embeddings for item in article['embeddings']]
# Fit a KNN model on the embeddings
knn = NearestNeighbors(n_neighbors=k, metric='cosine')
knn.fit(X)
# Find the indices and distances of the k-nearest neighbors
distances, indices = knn.kneighbors(question_embedding, n_neighbors=k)
# Get the indices and source texts of the best matches
best_matches = [(indices[0][i], source_texts[indices[0][i]]) for i in range(k)]
return best_matches

View File

@@ -0,0 +1,2 @@
node_modules
package-lock.json

View File

@@ -0,0 +1,21 @@
# Ask the Mentors
This example demonstrates how one would create a set of 'mentors' you can have a conversation with. The mentors are generated using the `character-generator.ts` file. This will use **Stable Beluga 70b** to create a bio and list of verbal ticks and common phrases used by each person. Then `mentors.ts` will take a question, and choose three of the 'mentors' and start a conversation with them. Occasionally, they will talk to each other, and other times they will just deliver a set of monologues. It's fun to see what they do and say.
## Usage
```bash
ts-node ./character-generator.ts "Lorne Greene"
```
This will create `lornegreene/Modelfile`. Now you can create a model with this command:
```bash
ollama create lornegreene -f lornegreene/Modelfile
```
If you want to add your own mentors, you will have to update the code to look at your namespace instead of **mattw**. Also set the list of mentors to include yours.
```bash
ts-node ./mentors.ts "What is a Jackalope?"
```

View File

@@ -0,0 +1,26 @@
import { Ollama } from 'ollama-node'
import fs from 'fs';
import path from 'path';
async function characterGenerator() {
const character = process.argv[2];
console.log(`You are creating a character for ${character}.`);
const foldername = character.replace(/\s/g, '').toLowerCase();
const directory = path.join(__dirname, foldername);
if (!fs.existsSync(directory)) {
fs.mkdirSync(directory, { recursive: true });
}
const ollama = new Ollama();
ollama.setModel("stablebeluga2:70b-q4_K_M");
const bio = await ollama.generate(`create a bio of ${character} in a single long paragraph. Instead of saying '${character} is...' or '${character} was...' use language like 'You are...' or 'You were...'. Then create a paragraph describing the speaking mannerisms and style of ${character}. Don't include anything about how ${character} looked or what they sounded like, just focus on the words they said. Instead of saying '${character} would say...' use language like 'You should say...'. If you use quotes, always use single quotes instead of double quotes. If there are any specific words or phrases you used a lot, show how you used them. `);
const thecontents = `FROM llama2\nSYSTEM """\n${bio.response.replace(/(\r\n|\n|\r)/gm, " ").replace('would', 'should')} All answers to questions should be related back to what you are most known for.\n"""`;
fs.writeFile(path.join(directory, 'Modelfile'), thecontents, (err: any) => {
if (err) throw err;
console.log('The file has been saved!');
});
}
characterGenerator();

View File

@@ -0,0 +1,59 @@
import { Ollama } from 'ollama-node';
const mentorCount = 3;
const ollama = new Ollama();
function getMentors(): string[] {
const mentors = ['Gary Vaynerchuk', 'Kanye West', 'Martha Stewart', 'Neil deGrasse Tyson', 'Owen Wilson', 'Ronald Reagan', 'Donald Trump', 'Barack Obama', 'Jeff Bezos'];
const chosenMentors: string[] = [];
for (let i = 0; i < mentorCount; i++) {
const mentor = mentors[Math.floor(Math.random() * mentors.length)];
chosenMentors.push(mentor);
mentors.splice(mentors.indexOf(mentor), 1);
}
return chosenMentors;
}
function getMentorFileName(mentor: string): string {
const model = mentor.toLowerCase().replace(/\s/g, '');
return `mattw/${model}`;
}
async function getSystemPrompt(mentor: string, isLast: boolean, question: string): Promise<string> {
ollama.setModel(getMentorFileName(mentor));
const info = await ollama.showModelInfo()
let SystemPrompt = info.system || '';
SystemPrompt += ` You should continue the conversation as if you were ${mentor} and acknowledge the people before you in the conversation. You should adopt their mannerisms and tone, but also not use language they wouldn't use. If they are not known to know about the concept in the question, don't offer an answer. Your answer should be no longer than 1 paragraph. And definitely try not to sound like anyone else. Don't repeat any slang or phrases already used. And if it is a question the original ${mentor} wouldn't have know the answer to, just say that you don't know, in the style of ${mentor}. And think about the time the person lived. Don't use terminology that they wouldn't have used.`
if (isLast) {
SystemPrompt += ` End your answer with something like I hope our answers help you out`;
} else {
SystemPrompt += ` Remember, this is a conversation, so you don't need a conclusion, but end your answer with a question related to the first question: "${question}".`;
}
return SystemPrompt;
}
async function main() {
const mentors = getMentors();
const question = process.argv[2];
let theConversation = `Here is the conversation so far.\nYou: ${question}\n`
for await (const mentor of mentors) {
const SystemPrompt = await getSystemPrompt(mentor, mentor === mentors[mentorCount - 1], question);
ollama.setModel(getMentorFileName(mentor));
ollama.setSystemPrompt(SystemPrompt);
let output = '';
process.stdout.write(`\n${mentor}: `);
for await (const chunk of ollama.streamingGenerate(theConversation + `Continue the conversation as if you were ${mentor} on the question "${question}".`)) {
if (chunk.response) {
output += chunk.response;
process.stdout.write(chunk.response);
} else {
process.stdout.write('\n');
}
}
theConversation += `${mentor}: ${output}\n\n`
}
}
main();

View File

@@ -0,0 +1,7 @@
{
"dependencies": {
"fs": "^0.0.1-security",
"ollama-node": "^0.0.3",
"path": "^0.12.7"
}
}

23
format/bytes.go Normal file
View File

@@ -0,0 +1,23 @@
package format
import "fmt"
const (
Byte = 1
KiloByte = Byte * 1000
MegaByte = KiloByte * 1000
GigaByte = MegaByte * 1000
)
func HumanBytes(b int64) string {
switch {
case b > GigaByte:
return fmt.Sprintf("%d GB", b/GigaByte)
case b > MegaByte:
return fmt.Sprintf("%d MB", b/MegaByte)
case b > KiloByte:
return fmt.Sprintf("%d KB", b/KiloByte)
default:
return fmt.Sprintf("%d B", b)
}
}

View File

@@ -10,15 +10,11 @@ package format
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"encoding/binary"
"encoding/pem"
"fmt"
"math/big"
"golang.org/x/crypto/ssh"
)
@@ -41,25 +37,6 @@ type openSSHPrivateKey struct {
Rest []byte `ssh:"rest"`
}
type openSSHRSAPrivateKey struct {
N *big.Int
E *big.Int
D *big.Int
Iqmp *big.Int
P *big.Int
Q *big.Int
Comment string
Pad []byte `ssh:"rest"`
}
type openSSHECDSAPrivateKey struct {
Curve string
Pub []byte
D *big.Int
Comment string
Pad []byte `ssh:"rest"`
}
type openSSHEd25519PrivateKey struct {
Pub []byte
Priv []byte
@@ -85,64 +62,6 @@ func OpenSSHPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error
}
switch k := key.(type) {
case *rsa.PrivateKey:
e := new(big.Int).SetInt64(int64(k.E))
key := openSSHRSAPrivateKey{
N: k.N,
E: e,
D: k.D,
Iqmp: k.Precomputed.Qinv,
P: k.Primes[0],
Q: k.Primes[1],
Comment: comment,
}
pk1.Keytype = ssh.KeyAlgoRSA
pk1.Rest = ssh.Marshal(key)
w.PubKey = ssh.Marshal(struct {
KeyType string
E *big.Int
N *big.Int
}{
ssh.KeyAlgoRSA, e, k.N,
})
case *ecdsa.PrivateKey:
var curve, keytype string
switch name := k.Curve.Params().Name; name {
case "P-256":
curve = "nistp256"
keytype = ssh.KeyAlgoECDSA256
case "P-384":
curve = "nistp384"
keytype = ssh.KeyAlgoECDSA384
case "P-521":
curve = "nistp521"
keytype = ssh.KeyAlgoECDSA521
default:
return nil, fmt.Errorf("ssh: unknown curve %q", name)
}
pub := elliptic.Marshal(k.Curve, k.X, k.Y)
key := openSSHECDSAPrivateKey{
Curve: curve,
Pub: pub,
D: k.D,
Comment: comment,
}
pk1.Keytype = keytype
pk1.Rest = ssh.Marshal(key)
w.PubKey = ssh.Marshal(struct {
KeyType string
Curve string
Pub []byte
}{
keytype, curve, pub,
})
case ed25519.PrivateKey:
pub, priv := k[32:], k
key := openSSHEd25519PrivateKey{

View File

@@ -7,26 +7,14 @@ import (
"time"
)
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.).
// Modified version of github.com/docker/go-units.HumanDuration
func HumanDuration(d time.Duration) string {
return HumanDurationWithCase(d, true)
}
// HumanDurationWithCase returns a human-readable approximation of a
// duration (eg. "About a minute", "4 hours ago", etc.). but allows
// you to specify whether the first word should be capitalized
// (eg. "About" vs. "about")
func HumanDurationWithCase(d time.Duration, useCaps bool) string {
// humanDuration returns a human-readable approximation of a
// duration (eg. "About a minute", "4 hours ago", etc.).
func humanDuration(d time.Duration) string {
seconds := int(d.Seconds())
switch {
case seconds < 1:
if useCaps {
return "Less than a second"
}
return "less than a second"
return "Less than a second"
case seconds == 1:
return "1 second"
case seconds < 60:
@@ -36,10 +24,7 @@ func HumanDurationWithCase(d time.Duration, useCaps bool) string {
minutes := int(d.Minutes())
switch {
case minutes == 1:
if useCaps {
return "About a minute"
}
return "about a minute"
return "About a minute"
case minutes < 60:
return fmt.Sprintf("%d minutes", minutes)
}
@@ -47,10 +32,7 @@ func HumanDurationWithCase(d time.Duration, useCaps bool) string {
hours := int(math.Round(d.Hours()))
switch {
case hours == 1:
if useCaps {
return "About an hour"
}
return "about an hour"
return "About an hour"
case hours < 48:
return fmt.Sprintf("%d hours", hours)
case hours < 24*7*2:
@@ -65,77 +47,22 @@ func HumanDurationWithCase(d time.Duration, useCaps bool) string {
}
func HumanTime(t time.Time, zeroValue string) string {
return humanTimeWithCase(t, zeroValue, true)
return humanTime(t, zeroValue)
}
func HumanTimeLower(t time.Time, zeroValue string) string {
return humanTimeWithCase(t, zeroValue, false)
return strings.ToLower(humanTime(t, zeroValue))
}
func humanTimeWithCase(t time.Time, zeroValue string, useCaps bool) string {
func humanTime(t time.Time, zeroValue string) string {
if t.IsZero() {
return zeroValue
}
delta := time.Since(t)
if delta < 0 {
return HumanDurationWithCase(-delta, useCaps) + " from now"
return humanDuration(-delta) + " from now"
}
return HumanDurationWithCase(delta, useCaps) + " ago"
}
// ExcatDuration returns a human readable hours/minutes/seconds or milliseconds format of a duration
// the most precise level of duration is milliseconds
func ExactDuration(d time.Duration) string {
if d.Seconds() < 1 {
if d.Milliseconds() == 1 {
return fmt.Sprintf("%d millisecond", d.Milliseconds())
}
return fmt.Sprintf("%d milliseconds", d.Milliseconds())
}
var readableDur strings.Builder
dur := d.String()
// split the default duration string format of 0h0m0s into something nicer to read
h := strings.Split(dur, "h")
if len(h) > 1 {
hours := h[0]
if hours == "1" {
readableDur.WriteString(fmt.Sprintf("%s hour ", hours))
} else {
readableDur.WriteString(fmt.Sprintf("%s hours ", hours))
}
dur = h[1]
}
m := strings.Split(dur, "m")
if len(m) > 1 {
mins := m[0]
switch mins {
case "0":
// skip
case "1":
readableDur.WriteString(fmt.Sprintf("%s minute ", mins))
default:
readableDur.WriteString(fmt.Sprintf("%s minutes ", mins))
}
dur = m[1]
}
s := strings.Split(dur, "s")
if len(s) > 0 {
sec := s[0]
switch sec {
case "0":
// skip
case "1":
readableDur.WriteString(fmt.Sprintf("%s second ", sec))
default:
readableDur.WriteString(fmt.Sprintf("%s seconds ", sec))
}
}
return strings.TrimSpace(readableDur.String())
return humanDuration(delta) + " ago"
}

View File

@@ -11,92 +11,25 @@ func assertEqual(t *testing.T, a interface{}, b interface{}) {
}
}
func TestHumanDuration(t *testing.T) {
day := 24 * time.Hour
week := 7 * day
month := 30 * day
year := 365 * day
assertEqual(t, "Less than a second", HumanDuration(450*time.Millisecond))
assertEqual(t, "Less than a second", HumanDurationWithCase(450*time.Millisecond, true))
assertEqual(t, "less than a second", HumanDurationWithCase(450*time.Millisecond, false))
assertEqual(t, "1 second", HumanDuration(1*time.Second))
assertEqual(t, "45 seconds", HumanDuration(45*time.Second))
assertEqual(t, "46 seconds", HumanDuration(46*time.Second))
assertEqual(t, "59 seconds", HumanDuration(59*time.Second))
assertEqual(t, "About a minute", HumanDuration(60*time.Second))
assertEqual(t, "About a minute", HumanDurationWithCase(1*time.Minute, true))
assertEqual(t, "about a minute", HumanDurationWithCase(1*time.Minute, false))
assertEqual(t, "3 minutes", HumanDuration(3*time.Minute))
assertEqual(t, "35 minutes", HumanDuration(35*time.Minute))
assertEqual(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
assertEqual(t, "45 minutes", HumanDuration(45*time.Minute))
assertEqual(t, "45 minutes", HumanDuration(45*time.Minute+40*time.Second))
assertEqual(t, "46 minutes", HumanDuration(46*time.Minute))
assertEqual(t, "59 minutes", HumanDuration(59*time.Minute))
assertEqual(t, "About an hour", HumanDuration(1*time.Hour))
assertEqual(t, "About an hour", HumanDurationWithCase(1*time.Hour+29*time.Minute, true))
assertEqual(t, "about an hour", HumanDurationWithCase(1*time.Hour+29*time.Minute, false))
assertEqual(t, "2 hours", HumanDuration(1*time.Hour+31*time.Minute))
assertEqual(t, "2 hours", HumanDuration(1*time.Hour+59*time.Minute))
assertEqual(t, "3 hours", HumanDuration(3*time.Hour))
assertEqual(t, "3 hours", HumanDuration(3*time.Hour+29*time.Minute))
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+31*time.Minute))
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+59*time.Minute))
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
assertEqual(t, "24 hours", HumanDuration(24*time.Hour))
assertEqual(t, "36 hours", HumanDuration(1*day+12*time.Hour))
assertEqual(t, "2 days", HumanDuration(2*day))
assertEqual(t, "7 days", HumanDuration(7*day))
assertEqual(t, "13 days", HumanDuration(13*day+5*time.Hour))
assertEqual(t, "2 weeks", HumanDuration(2*week))
assertEqual(t, "2 weeks", HumanDuration(2*week+4*day))
assertEqual(t, "3 weeks", HumanDuration(3*week))
assertEqual(t, "4 weeks", HumanDuration(4*week))
assertEqual(t, "4 weeks", HumanDuration(4*week+3*day))
assertEqual(t, "4 weeks", HumanDuration(1*month))
assertEqual(t, "6 weeks", HumanDuration(1*month+2*week))
assertEqual(t, "2 months", HumanDuration(2*month))
assertEqual(t, "2 months", HumanDuration(2*month+2*week))
assertEqual(t, "3 months", HumanDuration(3*month))
assertEqual(t, "3 months", HumanDuration(3*month+1*week))
assertEqual(t, "5 months", HumanDuration(5*month+2*week))
assertEqual(t, "13 months", HumanDuration(13*month))
assertEqual(t, "23 months", HumanDuration(23*month))
assertEqual(t, "24 months", HumanDuration(24*month))
assertEqual(t, "2 years", HumanDuration(24*month+2*week))
assertEqual(t, "3 years", HumanDuration(3*year+2*month))
}
func TestHumanTime(t *testing.T) {
now := time.Now()
t.Run("zero value", func(t *testing.T) {
assertEqual(t, HumanTime(time.Time{}, "never"), "never")
})
t.Run("time in the future", func(t *testing.T) {
v := now.Add(48 * time.Hour)
assertEqual(t, HumanTime(v, ""), "2 days from now")
})
t.Run("time in the past", func(t *testing.T) {
v := now.Add(-48 * time.Hour)
assertEqual(t, HumanTime(v, ""), "2 days ago")
})
}
func TestExactDuration(t *testing.T) {
assertEqual(t, "1 millisecond", ExactDuration(1*time.Millisecond))
assertEqual(t, "10 milliseconds", ExactDuration(10*time.Millisecond))
assertEqual(t, "1 second", ExactDuration(1*time.Second))
assertEqual(t, "10 seconds", ExactDuration(10*time.Second))
assertEqual(t, "1 minute", ExactDuration(1*time.Minute))
assertEqual(t, "10 minutes", ExactDuration(10*time.Minute))
assertEqual(t, "1 hour", ExactDuration(1*time.Hour))
assertEqual(t, "10 hours", ExactDuration(10*time.Hour))
assertEqual(t, "1 hour 1 second", ExactDuration(1*time.Hour+1*time.Second))
assertEqual(t, "1 hour 10 seconds", ExactDuration(1*time.Hour+10*time.Second))
assertEqual(t, "1 hour 1 minute", ExactDuration(1*time.Hour+1*time.Minute))
assertEqual(t, "1 hour 10 minutes", ExactDuration(1*time.Hour+10*time.Minute))
assertEqual(t, "1 hour 1 minute 1 second", ExactDuration(1*time.Hour+1*time.Minute+1*time.Second))
assertEqual(t, "10 hours 10 minutes 10 seconds", ExactDuration(10*time.Hour+10*time.Minute+10*time.Second))
t.Run("soon", func(t *testing.T) {
v := now.Add(800 * time.Millisecond)
assertEqual(t, HumanTime(v, ""), "Less than a second from now")
})
}

16
go.mod
View File

@@ -4,11 +4,14 @@ go 1.20
require (
github.com/dustin/go-humanize v1.0.1
github.com/emirpasic/gods v1.18.1
github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-runewidth v0.0.14
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/olekukonko/tablewriter v0.0.5
github.com/spf13/cobra v1.7.0
golang.org/x/sync v0.3.0
gonum.org/v1/gonum v0.14.0
)
require github.com/rivo/uniseg v0.2.0 // indirect
@@ -16,7 +19,6 @@ require github.com/rivo/uniseg v0.2.0 // indirect
require (
github.com/bytedance/sonic v1.9.1 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/chzyer/readline v1.5.1
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/cors v1.4.0
github.com/gin-contrib/sse v0.1.0 // indirect
@@ -38,12 +40,12 @@ require (
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.10.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0
golang.org/x/text v0.10.0 // indirect
gonum.org/v1/gonum v0.13.0
golang.org/x/crypto v0.14.0
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/term v0.13.0
golang.org/x/text v0.13.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

37
go.sum
View File

@@ -4,12 +4,6 @@ github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZX
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -17,6 +11,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gin-contrib/cors v1.4.0 h1:oJ6gwtUl3lqV0WEIwM/LxPF1QZ5qe2lGWdY2+bz7y0g=
@@ -118,31 +114,34 @@ golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUu
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ=
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM=
gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU=
gonum.org/v1/gonum v0.14.0 h1:2NiG67LD1tEH0D7kM+ps2V+fXmsAnpUeec7n8tcr4S0=
gonum.org/v1/gonum v0.14.0/go.mod h1:AoWeoz0becf9QMWtE8iWXNXc27fK4fNeHNf/oMejGfU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=

20
llm/falcon.go Normal file
View File

@@ -0,0 +1,20 @@
package llm
const (
falconModelType7B = 32
falconModelType40B = 60
falconModelType180B = 80
)
func falconModelType(numLayer uint32) string {
switch numLayer {
case 32:
return "7B"
case 60:
return "40B"
case 80:
return "180B"
default:
return "unknown"
}
}

View File

@@ -1,575 +0,0 @@
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ggml-alloc.h"
#include "ggml.h"
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define UNUSED(x) (void)(x)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
//#define GGML_ALLOCATOR_DEBUG
//#define AT_PRINTF printf
#define AT_PRINTF(...) ((void)0)
struct hash_node {
struct ggml_tensor * t;
int n_children;
int n_views;
};
static size_t hash(void * p) {
return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
}
static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_tensor * t) {
size_t h = hash(t);
// linear probing
size_t i = h;
while (hash_table[i].t != NULL) {
if (hash_table[i].t == t) {
return &hash_table[i];
}
i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
if (i == h) {
// hash table is full
GGML_ASSERT(false);
}
}
hash_table[i].t = t;
return &hash_table[i];
}
// TODO: GGML_PAD ?
static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
assert(alignment && !(alignment & (alignment - 1))); // power of 2
size_t align = (alignment - (((uintptr_t)buffer + offset) % alignment)) % alignment;
return offset + align;
}
struct free_block {
void * addr;
size_t size;
};
#define MAX_FREE_BLOCKS 128
struct ggml_allocr {
void * data;
size_t size;
size_t alignment;
int n_free_blocks;
struct free_block free_blocks[MAX_FREE_BLOCKS];
struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE];
size_t max_size;
bool measure;
#ifdef GGML_ALLOCATOR_DEBUG
struct ggml_tensor * allocated_tensors[1024];
#endif
};
#ifdef GGML_ALLOCATOR_DEBUG
static void add_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i] == NULL) {
alloc->allocated_tensors[i] = tensor;
return;
}
}
GGML_ASSERT(!"out of allocated_tensors");
}
static void remove_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i] == tensor ||
(alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
alloc->allocated_tensors[i] = NULL;
return;
}
}
printf("tried to free tensor %s not found\n", tensor->name);
GGML_ASSERT(!"tensor not found");
}
#endif
static size_t ggml_allocator_get_alloc_size(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
return ggml_nbytes(tensor);
UNUSED(alloc);
}
void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
size = aligned_offset(NULL, size, alloc->alignment);
AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
size_t max_avail = 0;
// find the best fitting free block
int best_fit_block = -1;
size_t best_fit_size = SIZE_MAX;
for (int i = 0; i < alloc->n_free_blocks; i++) {
struct free_block * block = &alloc->free_blocks[i];
max_avail = MAX(max_avail, block->size);
if (block->size >= size && block->size <= best_fit_size) {
best_fit_block = i;
best_fit_size = block->size;
}
}
AT_PRINTF("block %d\n", best_fit_block);
if (best_fit_block == -1) {
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
__func__, size, max_avail);
GGML_ASSERT(!"not enough space in the buffer");
return;
}
struct free_block * block = &alloc->free_blocks[best_fit_block];
void * addr = block->addr;
block->addr = (char*)block->addr + size;
block->size -= size;
if (block->size == 0) {
// remove block if empty
alloc->n_free_blocks--;
for (int j = best_fit_block; j < alloc->n_free_blocks; j++) {
alloc->free_blocks[j] = alloc->free_blocks[j+1];
}
}
tensor->data = addr;
#ifdef GGML_ALLOCATOR_DEBUG
add_allocated_tensor(alloc, tensor);
size_t cur_max = (char*)addr - (char*)alloc->data + size;
if (cur_max > alloc->max_size) {
printf("max_size = %.2f MB: tensors: ", cur_max / 1024.0 / 1024.0);
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i]) {
printf("%s (%.2f MB) ", alloc->allocated_tensors[i]->name, ggml_nbytes(alloc->allocated_tensors[i]) / 1024.0 / 1024.0);
}
}
printf("\n");
}
#endif
alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
}
// this is a very naive implementation, but for our case the number of free blocks should be very small
static void ggml_allocator_free_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
void * ptr = tensor->data;
if (ptr < alloc->data || (char*)ptr >= (char*)alloc->data + alloc->max_size) {
// the tensor was not allocated in this buffer
// this can happen because the graph allocator will try to free weights and other tensors from different buffers
// the easiest way to deal with this is just to ignore it
return;
}
size_t size = ggml_allocator_get_alloc_size(alloc, tensor);
size = aligned_offset(NULL, size, alloc->alignment);
AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
#ifdef GGML_ALLOCATOR_DEBUG
remove_allocated_tensor(alloc, tensor);
#endif
// see if we can merge with an existing block
for (int i = 0; i < alloc->n_free_blocks; i++) {
struct free_block * block = &alloc->free_blocks[i];
// check if ptr is at the end of the block
if ((char*)block->addr + block->size == ptr) {
block->size += size;
// check if we can merge with the next block
if (i < alloc->n_free_blocks - 1 && (char*)block->addr + block->size == alloc->free_blocks[i+1].addr) {
block->size += alloc->free_blocks[i+1].size;
alloc->n_free_blocks--;
for (int j = i+1; j < alloc->n_free_blocks; j++) {
alloc->free_blocks[j] = alloc->free_blocks[j+1];
}
}
return;
}
// check if ptr is at the beginning of the block
if ((char*)ptr + size == block->addr) {
block->addr = ptr;
block->size += size;
// check if we can merge with the previous block
if (i > 0 && (char*)alloc->free_blocks[i-1].addr + alloc->free_blocks[i-1].size == block->addr) {
alloc->free_blocks[i-1].size += block->size;
alloc->n_free_blocks--;
for (int j = i; j < alloc->n_free_blocks; j++) {
alloc->free_blocks[j] = alloc->free_blocks[j+1];
}
}
return;
}
}
// otherwise, add a new block
GGML_ASSERT(alloc->n_free_blocks < MAX_FREE_BLOCKS && "out of free blocks");
// insert the new block in the correct position to keep the array sorted by address (to make merging blocks faster)
int insert_pos = 0;
while (insert_pos < alloc->n_free_blocks && alloc->free_blocks[insert_pos].addr < ptr) {
insert_pos++;
}
// shift all blocks from insert_pos onward to make room for the new block
for (int i = alloc->n_free_blocks; i > insert_pos; i--) {
alloc->free_blocks[i] = alloc->free_blocks[i-1];
}
// insert the new block
alloc->free_blocks[insert_pos].addr = ptr;
alloc->free_blocks[insert_pos].size = size;
alloc->n_free_blocks++;
}
void ggml_allocr_reset(struct ggml_allocr * alloc) {
alloc->n_free_blocks = 1;
size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
alloc->free_blocks[0].size = alloc->size - align_offset;
}
struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) {
struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
*alloc = (struct ggml_allocr){
/*.data = */ data,
/*.size = */ size,
/*.alignment = */ alignment,
/*.n_free_blocks = */ 0,
/*.free_blocks = */ {{0}},
/*.hash_table = */ {{0}},
/*.max_size = */ 0,
/*.measure = */ false,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0},
#endif
};
ggml_allocr_reset(alloc);
return alloc;
}
// address and size of the buffer when measuring
// it needs to be large enough to fit all the tensors, but it cannot overlap with other existing buffers
static void * const MEASURE_BASE_ADDR = (void *) 0x1000;
static const size_t MEASURE_MAX_SIZE = 1ULL<<40; // 1 TB
struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
*alloc = (struct ggml_allocr){
/*.data = */ MEASURE_BASE_ADDR,
/*.size = */ MEASURE_MAX_SIZE,
/*.alignment = */ alignment,
/*.n_free_blocks = */ 0,
/*.free_blocks = */ {{0}},
/*.hash_table = */ {{0}},
/*.max_size = */ 0,
/*.measure = */ true,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ = {0},
#endif
};
ggml_allocr_reset(alloc);
return alloc;
}
void ggml_allocr_free(struct ggml_allocr * alloc) {
free(alloc);
}
bool ggml_allocr_is_measure(struct ggml_allocr * alloc) {
return alloc->measure;
}
//////////// compute graph allocator
static bool ggml_is_view(struct ggml_tensor * t) {
return t->op == GGML_OP_RESHAPE || t->op == GGML_OP_VIEW || t->op == GGML_OP_TRANSPOSE ||
t->op == GGML_OP_PERMUTE || t->op == GGML_OP_CPY;
}
static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
if (a->type != b->type) {
return false;
}
for (int i = 0; i < GGML_MAX_DIMS; i++) {
if (a->ne[i] != b->ne[i]) {
return false;
}
if (a->nb[i] != b->nb[i]) {
return false;
}
}
return true;
}
static struct ggml_tensor * get_view_parent(struct ggml_tensor * t) {
switch (t->op) {
case GGML_OP_PERMUTE:
case GGML_OP_RESHAPE:
case GGML_OP_TRANSPOSE:
case GGML_OP_VIEW:
return t->src[0];
case GGML_OP_CPY:
return t->src[1];
default:
return NULL;
}
}
static struct ggml_tensor * get_view_source(struct ggml_tensor * t) {
struct ggml_tensor * parent = t;
do {
parent = get_view_parent(parent);
} while (ggml_is_view(parent));
return parent;
}
static bool ggml_op_can_inplace(enum ggml_op op) {
switch (op) {
case GGML_OP_SCALE:
case GGML_OP_DIAG_MASK_ZERO:
case GGML_OP_DIAG_MASK_INF:
case GGML_OP_ADD:
case GGML_OP_ADD1:
case GGML_OP_ACC:
case GGML_OP_SUB:
case GGML_OP_MUL:
case GGML_OP_DIV:
case GGML_OP_SQR:
case GGML_OP_SQRT:
case GGML_OP_LOG:
case GGML_OP_UNARY:
case GGML_OP_ROPE:
case GGML_OP_RMS_NORM:
case GGML_OP_SET:
case GGML_OP_SOFT_MAX:
case GGML_OP_CONT:
return true;
default:
return false;
}
}
static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) {
struct hash_node * ht = alloc->hash_table;
if (node->data == NULL) {
if (ggml_is_view(node)) {
size_t offset;
switch(node->op) {
case GGML_OP_VIEW:
memcpy(&offset, node->op_params, sizeof(size_t));
node->data = (char *) node->src[0]->data + offset;
break;
case GGML_OP_PERMUTE:
case GGML_OP_RESHAPE:
case GGML_OP_TRANSPOSE:
node->data = node->src[0]->data;
break;
case GGML_OP_CPY:
node->data = node->src[1]->data;
break;
default:
GGML_ASSERT(!"unknown view op");
break;
}
} else {
// see if we can reuse a parent's buffer (inplace)
if (ggml_op_can_inplace(node->op)) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
struct ggml_tensor * parent = node->src[i];
if (parent == NULL) {
break;
}
// if the node's data is external, then we cannot re-use it
if ((char *) parent->data < (char *) alloc->data ||
(char *) parent->data >= ((char *) alloc->data + alloc->size)) {
AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
continue;
}
struct hash_node * p_hn = hash_get(ht, parent);
if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) {
if (ggml_is_view(parent)) {
struct ggml_tensor * view_src = get_view_source(parent);
struct hash_node * view_src_hn = hash_get(ht, view_src);
if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
// TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
// the parent's data that it will need later (same layout requirement). the problem is that then
// we cannot free the tensor because the original address of the allocation is lost.
// adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
// for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
node->data = parent->data;
return;
}
}
else {
AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
node->data = parent->data;
}
return;
}
}
}
ggml_allocr_alloc(alloc, node);
}
}
}
static size_t ggml_allocator_alloc_graph_tensors_n(
struct ggml_allocr * alloc,
struct ggml_cgraph ** graphs, int n_graphs,
struct ggml_tensor *** inputs, struct ggml_tensor *** outputs) {
// reset hash table
struct hash_node * ht = alloc->hash_table;
memset(ht, 0, sizeof(struct hash_node) * GGML_GRAPH_HASHTABLE_SIZE);
// count number of children and views
for (int g = 0; g < n_graphs; g++) {
struct ggml_cgraph * gf = graphs[g];
for (int i = 0; i < gf->n_nodes; i++) {
struct ggml_tensor * node = gf->nodes[i];
if (ggml_is_view(node)) {
struct ggml_tensor * view_src = get_view_source(node);
hash_get(ht, view_src)->n_views += 1;
}
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
hash_get(ht, parent)->n_children += 1;
}
}
}
// allocate tensors
for (int g = 0; g < n_graphs; g++) {
struct ggml_cgraph * gf = graphs[g];
AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
// graph inputs are allocated first to ensure that they are not overwritten by each other
if (inputs != NULL && inputs[g] != NULL) {
for (int i = 0; inputs[g][i] != NULL; i++) {
struct ggml_tensor * input = inputs[g][i];
AT_PRINTF("input: %s\n", input->name);
allocate_node(alloc, input);
}
}
for (int i = 0; i < gf->n_nodes; i++) {
struct ggml_tensor * node = gf->nodes[i];
// allocate parents (leafs)
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
allocate_node(alloc, parent);
}
// allocate node
allocate_node(alloc, node);
AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
AT_PRINTF("%s", parent->name);
if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
AT_PRINTF(", ");
}
}
AT_PRINTF("\n");
// update parents
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
struct hash_node * p_hn = hash_get(ht, parent);
p_hn->n_children -= 1;
//AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
if (p_hn->n_children == 0 && p_hn->n_views == 0) {
if (ggml_is_view(parent)) {
struct ggml_tensor * view_src = get_view_source(parent);
struct hash_node * view_src_hn = hash_get(ht, view_src);
view_src_hn->n_views -= 1;
AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src->n_children, view_src->n_views);
if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
ggml_allocator_free_tensor(alloc, view_src);
}
}
else {
if (parent->data != node->data) {
ggml_allocator_free_tensor(alloc, parent);
}
}
}
}
AT_PRINTF("\n");
}
// free graph outputs here that wouldn't be freed otherwise because they have no children
if (outputs != NULL && outputs[g] != NULL) {
for (int i = 0; outputs[g][i] != NULL; i++) {
struct ggml_tensor * output = outputs[g][i];
AT_PRINTF("output: %s\n", output->name);
ggml_allocator_free_tensor(alloc, output);
}
}
}
return alloc->max_size;
}
size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph) {
return ggml_allocator_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
}

View File

@@ -1,48 +0,0 @@
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);
GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);
GGML_API void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor);
GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph);
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,63 +0,0 @@
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
#define GGML_CUDA_MAX_DEVICES 16
void ggml_init_cublas(void);
void ggml_cuda_set_tensor_split(const float * tensor_split);
void ggml_cuda_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
size_t ggml_cuda_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
void ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
// TODO: export these with GGML_API
void * ggml_cuda_host_malloc(size_t size);
void ggml_cuda_host_free(void * ptr);
void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
void ggml_cuda_free_data(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
void ggml_cuda_set_main_device(int main_device);
void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
void ggml_cuda_set_scratch_size(size_t scratch_size);
void ggml_cuda_free_scratch(void);
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
#ifdef __cplusplus
}
#endif

View File

@@ -1,106 +0,0 @@
//go:build darwin
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// An interface allowing to compute ggml_cgraph with Metal
//
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
//
// How it works?
//
// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
//
// You only need to make sure that all memory buffers that you used during the graph creation
// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
// used during the graph evaluation to determine the arguments of the compute kernels.
//
// Synchronization between device and host memory (for example for input and output tensors)
// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
//
#pragma once
#include <stddef.h>
#include <stdbool.h>
// max memory buffers that can be mapped to the device
#define GGML_METAL_MAX_BUFFERS 16
struct ggml_tensor;
struct ggml_cgraph;
#ifdef __cplusplus
extern "C" {
#endif
struct ggml_metal_context;
// number of command buffers to use
struct ggml_metal_context * ggml_metal_init(int n_cb);
void ggml_metal_free(struct ggml_metal_context * ctx);
// set the number of command buffers to use
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb);
// creates a mapping between a host memory buffer and a device memory buffer
// - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute
// - the mapping is used during computation to determine the arguments of the compute kernels
// - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
// - max_size specifies the maximum size of a tensor and is used to create shared views such
// that it is guaranteed that the tensor will fit in at least one of the views
//
bool ggml_metal_add_buffer(
struct ggml_metal_context * ctx,
const char * name,
void * data,
size_t size,
size_t max_size);
// set data from host memory into the device
void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
// get data from the device into host memory
void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
// try to find operations that can be run concurrently in the graph
// you should run it again if the topology of your graph changes
void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
// if the graph has been optimized for concurrently dispatch
bool ggml_metal_if_optimized(struct ggml_metal_context * ctx);
// same as ggml_graph_compute but uses Metal
// creates gf->n_threads command buffers in parallel
void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,244 +0,0 @@
//go:build mpi
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ggml-mpi.h"
#include "ggml.h"
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define UNUSED GGML_UNUSED
struct ggml_mpi_context {
int rank;
int size;
};
void ggml_mpi_backend_init(void) {
MPI_Init(NULL, NULL);
}
void ggml_mpi_backend_free(void) {
MPI_Finalize();
}
struct ggml_mpi_context * ggml_mpi_init(void) {
struct ggml_mpi_context * ctx = calloc(1, sizeof(struct ggml_mpi_context));
MPI_Comm_rank(MPI_COMM_WORLD, &ctx->rank);
MPI_Comm_size(MPI_COMM_WORLD, &ctx->size);
return ctx;
}
void ggml_mpi_free(struct ggml_mpi_context * ctx) {
free(ctx);
}
int ggml_mpi_rank(struct ggml_mpi_context * ctx) {
return ctx->rank;
}
void ggml_mpi_eval_init(
struct ggml_mpi_context * ctx_mpi,
int * n_tokens,
int * n_past,
int * n_threads) {
UNUSED(ctx_mpi);
// synchronize the worker node parameters with the root node
MPI_Barrier(MPI_COMM_WORLD);
MPI_Bcast(n_tokens, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(n_past, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(n_threads, 1, MPI_INT, 0, MPI_COMM_WORLD);
}
static int ggml_graph_get_node_idx(struct ggml_cgraph * gf, const char * name) {
struct ggml_tensor * t = ggml_graph_get_tensor(gf, name);
if (t == NULL) {
fprintf(stderr, "%s: tensor %s not found\n", __func__, name);
return -1;
}
for (int i = 0; i < gf->n_nodes; i++) {
if (gf->nodes[i] == t) {
return i;
}
}
fprintf(stderr, "%s: tensor %s not found in graph (should not happen)\n", __func__, name);
return -1;
}
static void ggml_mpi_tensor_send(struct ggml_tensor * t, int mpi_rank_dst) {
MPI_Datatype mpi_type;
switch (t->type) {
case GGML_TYPE_I32: mpi_type = MPI_INT32_T; break;
case GGML_TYPE_F32: mpi_type = MPI_FLOAT; break;
default: GGML_ASSERT(false && "not implemented");
}
const int retval = MPI_Send(t->data, ggml_nelements(t), mpi_type, mpi_rank_dst, 0, MPI_COMM_WORLD);
GGML_ASSERT(retval == MPI_SUCCESS);
}
static void ggml_mpi_tensor_recv(struct ggml_tensor * t, int mpi_rank_src) {
MPI_Datatype mpi_type;
switch (t->type) {
case GGML_TYPE_I32: mpi_type = MPI_INT32_T; break;
case GGML_TYPE_F32: mpi_type = MPI_FLOAT; break;
default: GGML_ASSERT(false && "not implemented");
}
MPI_Status status; UNUSED(status);
const int retval = MPI_Recv(t->data, ggml_nelements(t), mpi_type, mpi_rank_src, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
GGML_ASSERT(retval == MPI_SUCCESS);
}
// TODO: there are many improvements that can be done to this implementation
void ggml_mpi_graph_compute_pre(
struct ggml_mpi_context * ctx_mpi,
struct ggml_cgraph * gf,
int n_layers) {
const int mpi_rank = ctx_mpi->rank;
const int mpi_size = ctx_mpi->size;
struct ggml_tensor * inp_tokens = ggml_graph_get_tensor(gf, "inp_tokens");
if (inp_tokens == NULL) {
fprintf(stderr, "%s: tensor 'inp_tokens' not found\n", __func__);
return;
}
struct ggml_tensor * inp0 = ggml_graph_get_tensor(gf, "layer_inp_0");
if (inp0 == NULL) {
fprintf(stderr, "%s: tensor 'inp0' not found\n", __func__);
return;
}
GGML_ASSERT(inp0 == gf->nodes[0]);
// distribute the compute graph into slices across the MPI nodes
//
// the main node (0) processes the last layers + the remainder of the compute graph
// and is responsible to pass the input tokens to the first node (1)
//
// node 1: [( 0) * n_per_node, ( 1) * n_per_node)
// node 2: [( 1) * n_per_node, ( 2) * n_per_node)
// ...
// node n-1: [(n-2) * n_per_node, (n-1) * n_per_node)
// node 0: [(n-1) * n_per_node, n_nodes)
//
if (mpi_rank > 0) {
if (mpi_rank == 1) {
// the first node (1) receives the input tokens from the main node (0)
ggml_mpi_tensor_recv(inp_tokens, 0);
} else {
// recv input data for each node into the "inp0" tensor (i.e. the first node in the compute graph)
ggml_mpi_tensor_recv(inp0, mpi_rank - 1);
}
} else if (mpi_size > 1) {
// node 0 sends the input tokens to node 1
ggml_mpi_tensor_send(inp_tokens, 1);
// recv the output data from the last node
ggml_mpi_tensor_recv(inp0, mpi_size - 1);
}
{
const int n_per_node = (n_layers + (mpi_size - 1)) / mpi_size;
const int mpi_idx = mpi_rank > 0 ? mpi_rank - 1 : mpi_size - 1;
const int il0 = (mpi_idx + 0) * n_per_node;
const int il1 = MIN(n_layers, (mpi_idx + 1) * n_per_node);
char name_l0[GGML_MAX_NAME];
char name_l1[GGML_MAX_NAME];
snprintf(name_l0, sizeof(name_l0), "layer_inp_%d", il0);
snprintf(name_l1, sizeof(name_l1), "layer_inp_%d", il1);
const int idx_l0 = ggml_graph_get_node_idx(gf, name_l0);
const int idx_l1 = mpi_rank > 0 ? ggml_graph_get_node_idx(gf, name_l1) + 1 : gf->n_nodes;
if (idx_l0 < 0 || idx_l1 < 0) {
fprintf(stderr, "%s: layer input nodes not found\n", __func__);
return;
}
// attach the input data to all nodes that need it
// TODO: not great - should be able to do this without modifying the compute graph (see next TODO below)
for (int i = idx_l0; i < idx_l1; i++) {
if (gf->nodes[i]->src[0] == gf->nodes[idx_l0]) {
gf->nodes[i]->src[0] = inp0;
}
if (gf->nodes[i]->src[1] == gf->nodes[idx_l0]) {
gf->nodes[i]->src[1] = inp0;
}
}
// TODO: instead of rearranging the nodes, we should be able to execute a subset of the compute graph
for (int i = 1; i < idx_l1 - idx_l0; i++) {
gf->nodes[i] = gf->nodes[idx_l0 + i];
gf->grads[i] = gf->grads[idx_l0 + i];
}
// the first node performs the "get_rows" operation, the rest of the nodes get the data from the previous node
if (mpi_idx != 0) {
gf->nodes[0]->op = GGML_OP_NONE;
}
gf->n_nodes = idx_l1 - idx_l0;
//fprintf(stderr, "%s: node %d: processing %d nodes [%d, %d)\n", __func__, mpi_rank, gf->n_nodes, il0, il1);
}
}
void ggml_mpi_graph_compute_post(
struct ggml_mpi_context * ctx_mpi,
struct ggml_cgraph * gf,
int n_layers) {
UNUSED(n_layers);
const int mpi_rank = ctx_mpi->rank;
const int mpi_size = ctx_mpi->size;
// send the output data to the next node
if (mpi_rank > 0) {
ggml_mpi_tensor_send(gf->nodes[gf->n_nodes - 1], (mpi_rank + 1) % mpi_size);
}
}

View File

@@ -1,67 +0,0 @@
//go:build mpi
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
struct ggml_context;
struct ggml_tensor;
struct ggml_cgraph;
#ifdef __cplusplus
extern "C" {
#endif
struct ggml_mpi_context;
void ggml_mpi_backend_init(void);
void ggml_mpi_backend_free(void);
struct ggml_mpi_context * ggml_mpi_init(void);
void ggml_mpi_free(struct ggml_mpi_context * ctx);
int ggml_mpi_rank(struct ggml_mpi_context * ctx);
void ggml_mpi_eval_init(
struct ggml_mpi_context * ctx_mpi,
int * n_tokens,
int * n_past,
int * n_threads);
void ggml_mpi_graph_compute_pre(
struct ggml_mpi_context * ctx_mpi,
struct ggml_cgraph * gf,
int n_layers);
void ggml_mpi_graph_compute_post(
struct ggml_mpi_context * ctx_mpi,
struct ggml_cgraph * gf,
int n_layers);
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,53 +0,0 @@
//go:build opencl
/**
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
void ggml_cl_init(void);
void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
void * ggml_cl_host_malloc(size_t size);
void ggml_cl_host_free(void * ptr);
void ggml_cl_free_data(const struct ggml_tensor* tensor);
void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
#ifdef __cplusplus
}
#endif

18722
llm/ggml.c

File diff suppressed because it is too large Load Diff

View File

@@ -3,69 +3,96 @@ package llm
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
type ModelFamily string
type ModelType uint32
const (
ModelType3B ModelType = 26
ModelType7B ModelType = 32
ModelType13B ModelType = 40
ModelType30B ModelType = 60
ModelType65B ModelType = 80
)
func (mt ModelType) String() string {
switch mt {
case ModelType3B:
return "3B"
case ModelType7B:
return "7B"
case ModelType13B:
return "13B"
case ModelType30B:
return "30B"
case ModelType65B:
return "65B"
default:
return "Unknown"
}
}
type FileType interface {
String() string
}
type GGML struct {
magic uint32
container
model
}
const (
fileTypeF32 uint32 = iota
fileTypeF16
fileTypeQ4_0
fileTypeQ4_1
fileTypeQ4_1_F16
fileTypeQ8_0 uint32 = iota + 2
fileTypeQ5_0
fileTypeQ5_1
fileTypeQ2_K
fileTypeQ3_K_S
fileTypeQ3_K_M
fileTypeQ3_K_L
fileTypeQ4_K_S
fileTypeQ4_K_M
fileTypeQ5_K_S
fileTypeQ5_K_M
fileTypeQ6_K
)
func fileType(fileType uint32) string {
switch fileType {
case fileTypeF32:
return "F32"
case fileTypeF16:
return "F16"
case fileTypeQ4_0:
return "Q4_0"
case fileTypeQ4_1:
return "Q4_1"
case fileTypeQ4_1_F16:
return "Q4_1_F16"
case fileTypeQ8_0:
return "Q8_0"
case fileTypeQ5_0:
return "Q5_0"
case fileTypeQ5_1:
return "Q5_1"
case fileTypeQ2_K:
return "Q2_K"
case fileTypeQ3_K_S:
return "Q3_K_S"
case fileTypeQ3_K_M:
return "Q3_K_M"
case fileTypeQ3_K_L:
return "Q3_K_L"
case fileTypeQ4_K_S:
return "Q4_K_S"
case fileTypeQ4_K_M:
return "Q4_K_M"
case fileTypeQ5_K_S:
return "Q5_K_S"
case fileTypeQ5_K_M:
return "Q5_K_M"
case fileTypeQ6_K:
return "Q6_K"
default:
return "unknown"
}
}
type model interface {
ModelFamily() ModelFamily
ModelType() ModelType
FileType() FileType
ModelFamily() string
ModelType() string
FileType() string
NumLayers() int64
}
type container interface {
Name() string
Decode(io.Reader) error
Decode(io.Reader) (model, error)
}
type containerGGML struct {
}
type containerGGML struct{}
func (c *containerGGML) Name() string {
return "ggml"
}
func (c *containerGGML) Decode(r io.Reader) error {
return nil
func (c *containerGGML) Decode(r io.Reader) (model, error) {
return nil, nil
}
type containerGGMF struct {
@@ -76,18 +103,18 @@ func (c *containerGGMF) Name() string {
return "ggmf"
}
func (c *containerGGMF) Decode(r io.Reader) error {
func (c *containerGGMF) Decode(r io.Reader) (model, error) {
var version uint32
binary.Read(r, binary.LittleEndian, &version)
switch version {
case 1:
default:
return errors.New("invalid version")
return nil, errors.New("invalid version")
}
c.version = version
return nil
return nil, nil
}
type containerGGJT struct {
@@ -98,18 +125,22 @@ func (c *containerGGJT) Name() string {
return "ggjt"
}
func (c *containerGGJT) Decode(r io.Reader) error {
func (c *containerGGJT) Decode(r io.Reader) (model, error) {
var version uint32
binary.Read(r, binary.LittleEndian, &version)
switch version {
case 1, 2, 3:
default:
return errors.New("invalid version")
return nil, errors.New("invalid version")
}
c.version = version
return nil
// different model types may have different layouts for hyperparameters
var llama llamaModel
binary.Read(r, binary.LittleEndian, &llama.hyperparameters)
return &llama, nil
}
type containerLORA struct {
@@ -120,32 +151,35 @@ func (c *containerLORA) Name() string {
return "ggla"
}
func (c *containerLORA) Decode(r io.Reader) error {
func (c *containerLORA) Decode(r io.Reader) (model, error) {
var version uint32
binary.Read(r, binary.LittleEndian, &version)
switch version {
case 1:
default:
return errors.New("invalid version")
return nil, errors.New("invalid version")
}
c.version = version
return nil
return nil, nil
}
const (
// / Magic constant for `ggml` files (unversioned).
// Magic constant for `ggml` files (unversioned).
FILE_MAGIC_GGML = 0x67676d6c
// / Magic constant for `ggml` files (versioned, ggmf).
// Magic constant for `ggml` files (versioned, ggmf).
FILE_MAGIC_GGMF = 0x67676d66
// / Magic constant for `ggml` files (versioned, ggjt).
// Magic constant for `ggml` files (versioned, ggjt).
FILE_MAGIC_GGJT = 0x67676a74
// / Magic constant for `ggla` files (LoRA adapter).
// Magic constant for `ggla` files (LoRA adapter).
FILE_MAGIC_GGLA = 0x67676C61
// Magic constant for `gguf` files (versioned, gguf)
FILE_MAGIC_GGUF_LE = 0x46554747
FILE_MAGIC_GGUF_BE = 0x47475546
)
func DecodeGGML(r io.ReadSeeker, hint ModelFamily) (*GGML, error) {
func DecodeGGML(r io.ReadSeeker) (*GGML, error) {
var ggml GGML
binary.Read(r, binary.LittleEndian, &ggml.magic)
@@ -158,24 +192,20 @@ func DecodeGGML(r io.ReadSeeker, hint ModelFamily) (*GGML, error) {
ggml.container = &containerGGJT{}
case FILE_MAGIC_GGLA:
ggml.container = &containerLORA{}
case FILE_MAGIC_GGUF_LE:
ggml.container = &containerGGUF{bo: binary.LittleEndian}
case FILE_MAGIC_GGUF_BE:
ggml.container = &containerGGUF{bo: binary.BigEndian}
default:
return nil, errors.New("invalid file magic")
}
if err := ggml.Decode(r); err != nil {
model, err := ggml.Decode(r)
if err != nil {
return nil, err
}
// different model types may have different layouts for hyperparameters
switch hint {
case ModelFamilyLlama:
var llama llamaModel
binary.Read(r, binary.LittleEndian, &llama.hyperparameters)
ggml.model = &llama
// TODO: sanity check hyperparameters
default:
return nil, fmt.Errorf("unsupported model type: %s", hint)
}
ggml.model = model
// final model type
return &ggml, nil

1780
llm/ggml.h

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More