Compare commits

...

154 Commits

Author SHA1 Message Date
Patrick Devine
08b900250f vendor in progress bar and change to bytes instead of bibytes 2023-07-19 15:53:26 -07:00
Michael Chiang
d53988f619 Merge pull request #128 from jmorganca/mchiang0610-patch-1
Update modelfile.md
2023-07-19 13:40:39 -07:00
Michael Chiang
ac88ab48d9 update 2023-07-19 13:37:21 -07:00
Michael Yang
84c6ee8cc6 Merge pull request #104 from jmorganca/interactive-readline
use readline
2023-07-19 13:36:24 -07:00
Michael Yang
dbc90576b8 add verbose/quiet commands 2023-07-19 13:34:56 -07:00
Michael Yang
84200dcde6 use readline 2023-07-19 13:34:56 -07:00
Michael Chiang
e54c08da89 updating prompt 2023-07-19 13:34:40 -07:00
Michael Chiang
31413857ea organizing examples 2023-07-19 13:25:14 -07:00
Michael Chiang
25f874c030 Update modelfile.md 2023-07-19 12:48:57 -07:00
Jeffrey Morgan
10d502611f fix discord link in README.md 2023-07-19 12:31:48 -07:00
Jeffrey Morgan
7fe4103b94 add discord link, remove repeated text 2023-07-19 12:28:50 -07:00
Michael Chiang
7fbdc8e2c1 Update modelfile.md 2023-07-19 11:38:06 -07:00
Eva Ho
9c5572d51f add discord link back 2023-07-19 13:03:26 -04:00
Matt Williams
75eb28f574 Merge pull request #125 from jmorganca/matt/addlicensetomodelfiledoc
Updated modelfile doc to include license
2023-07-19 08:57:06 -07:00
Patrick Devine
56b6a1720f add llama2:13b model to the readme (#126) 2023-07-19 08:21:28 -07:00
Eva Ho
dfceca48a7 update icons to have different images for bright and dark mode 2023-07-19 11:14:43 -04:00
Matt Williams
bbb67002c3 get rid of latest
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-07-19 07:40:40 -07:00
Michael Chiang
0294216ea9 Merge pull request #124 from DavidZirinsky/patch-1
Update README.md
2023-07-19 07:40:24 -07:00
Matt Williams
7a62b2d2ab Update the FROM instructions
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-07-19 07:39:40 -07:00
Eva Ho
f08c050e57 fix page transitions flickering 2023-07-19 10:19:24 -04:00
Matt Williams
67c8d49757 Updated modelfile doc to include license
and attributed midjourneyprompt

Signed-off-by: Matt Williams <m@technovangelist.com>
2023-07-19 07:16:38 -07:00
DavidZirinsky
ffcd90e8a7 Update README.md
I needed to do this to run the project
2023-07-19 08:14:44 -06:00
Jeffrey Morgan
4ca7c4be1f dont consume reader when calculating digest 2023-07-19 00:47:55 -07:00
Michael Chiang
17b7af78f0 Merge pull request #115 from jmorganca/Add-wizard-vicuna-uncensored-model-link
Add wizard vicuna uncensored model link
2023-07-18 22:58:07 -07:00
Jeffrey Morgan
4c1dc52083 app: create /usr/local/bin/ if it does not exist 2023-07-18 22:50:52 -07:00
Patrick Devine
572fc9099f add license layers to the parser (#116) 2023-07-18 22:49:38 -07:00
Michael Chiang
3020f29041 Add wizard vicuna uncensored model link 2023-07-18 22:19:12 -07:00
Michael Yang
a6d03dd510 Merge pull request #110 from jmorganca/fix-pull-0-bytes
fix pull 0 bytes on completed layer
2023-07-18 19:38:59 -07:00
Michael Yang
68df36ae50 fix pull 0 bytes on completed layer 2023-07-18 19:38:11 -07:00
Michael Yang
5540305293 Merge pull request #112 from jmorganca/fix-relative-modelfile
resolve modelfile before passing to server
2023-07-18 19:36:24 -07:00
Michael Yang
d4cfee79d5 resolve modelfile before passing to server 2023-07-18 19:34:05 -07:00
Michael Yang
6e36f948df Merge pull request #109 from jmorganca/fix-create-memory
fix memory leak in create
2023-07-18 17:25:19 -07:00
Michael Yang
553fa39fe8 fix memory leak in create 2023-07-18 17:14:17 -07:00
Jeffrey Morgan
820e581ad8 web: fix typos and add link to discord 2023-07-18 17:03:40 -07:00
Isaac McFadyen
d14785738e README typo fix (#106)
* Fixed typo in README
2023-07-18 16:24:57 -07:00
Patrick Devine
9e15635c2d attempt two for skipping files in the file walk (#105) 2023-07-18 15:37:01 -07:00
Jeffrey Morgan
3e10f902f5 add mario example 2023-07-18 14:27:36 -07:00
Jeffrey Morgan
aa6714f25c fix typo in README.md 2023-07-18 14:03:11 -07:00
Jeffrey Morgan
7f3a37aed4 fix typo 2023-07-18 13:32:06 -07:00
Jeffrey Morgan
7b08280355 move download to the top of README.md 2023-07-18 13:31:25 -07:00
Jeffrey Morgan
e3cc4d5eac update README.md with new syntax 2023-07-18 13:22:46 -07:00
Jeffrey Morgan
8c85dfb735 Add README.md for examples 2023-07-18 13:22:46 -07:00
hoyyeva
ac62a413e5 Merge pull request #103 from jmorganca/web-update
website content and design update
2023-07-18 16:18:04 -04:00
Eva Ho
d1f89778e9 fix css on smaller screen 2023-07-18 16:17:42 -04:00
Eva Ho
df67a90e64 fix css 2023-07-18 16:02:45 -04:00
Eva Ho
576ae644de enable downloader 2023-07-18 15:57:39 -04:00
Eva Ho
7e52e51db1 update website text and design 2023-07-18 15:56:43 -04:00
Michael Chiang
f12df8d79a Merge pull request #101 from jmorganca/adding-logo
add logo
2023-07-18 12:47:20 -07:00
Michael Chiang
65de730bdb Update README.md
add logo
2023-07-18 12:45:38 -07:00
Patrick Devine
9658a5043b skip files in the list if we can't get the correct model path (#100) 2023-07-18 12:39:08 -07:00
Jeffrey Morgan
280fbe8019 app: use llama2 instead of orca 2023-07-18 12:36:03 -07:00
Jeffrey Morgan
2e339c2bab flatten examples 2023-07-18 12:33:50 -07:00
Michael Yang
38f0c54c64 Merge pull request #99 from jmorganca/mkdir-blobs
fix mkdir blob path
2023-07-18 11:29:05 -07:00
Michael Yang
f20426a768 fix mkdir blob path 2023-07-18 11:24:19 -07:00
Michael Yang
885f67a471 Merge pull request #92 from jmorganca/create-model-spinner
Create model spinner
2023-07-18 11:15:45 -07:00
Eva Ho
a9cc270b4d icon update 2023-07-18 13:33:26 -04:00
Eva Ho
aa281a30e5 updating icons 2023-07-18 13:33:26 -04:00
Matt Williams
760bc3366b Merge pull request #98 from jmorganca/matt/modelfiledoc
First stab at a modelfile doc
2023-07-18 09:16:01 -07:00
Patrick Devine
5bea29f610 add new list command (#97) 2023-07-18 09:09:45 -07:00
Matt Williams
9310ee3967 First stab at a modelfile doc
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-07-18 08:22:17 -07:00
Matt Williams
da7ddbb4dc Merge pull request #95 from jmorganca/matt/examplemodelfiles 2023-07-18 05:32:38 -07:00
Patrick Devine
4a28a2f093 add modelpaths (#96) 2023-07-17 22:44:21 -07:00
Matt Williams
3d9498dc95 Some simple modelfile examples
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-07-17 17:16:59 -07:00
Jeffrey Morgan
1f45f7bb52 convert commands to uppercase in parser 2023-07-17 15:34:08 -07:00
Michael Yang
2e6c64a8f9 Merge pull request #88 from jmorganca/modelfile-params
modelfile params
2023-07-17 14:18:56 -07:00
Michael Yang
c7dd52271c remove debugging messages 2023-07-17 14:17:34 -07:00
Michael Yang
e4300e1eb7 add spinner to create 2023-07-17 14:15:42 -07:00
Michael Yang
aba706ea2d remove unused persistent pre run 2023-07-17 14:14:57 -07:00
Michael Yang
53d0052c6c unavoid unnecessary type conversion 2023-07-17 12:35:03 -07:00
Michael Yang
28a136e9a3 modelfile params 2023-07-17 12:35:03 -07:00
Jeffrey Morgan
529ff9ab6d Add note to README.md about Apple Silicon support 2023-07-17 11:22:34 -07:00
Michael Yang
41aca47d43 Merge pull request #87 from jmorganca/windows
fix file paths for windows
2023-07-17 11:21:25 -07:00
Michael Yang
3862a51a6a create directories if they do not exist 2023-07-17 11:18:48 -07:00
Michael Yang
bcb612a30a fix file paths for windows 2023-07-17 10:47:47 -07:00
hoyyeva
c05219aa0d Merge pull request #86 from jmorganca/welcome-screen-improve
welcome screen improvements
2023-07-17 13:44:53 -04:00
Eva Ho
508ffbbb15 improve the copy command experience 2023-07-17 13:17:52 -04:00
Jeffrey Morgan
59fa93cdd4 app: simpler winston settings 2023-07-16 20:26:12 -07:00
Jeffrey Morgan
952abe029b app: remove unused import 2023-07-16 20:25:50 -07:00
Jeffrey Morgan
f923855906 app: keep installer in foreground 2023-07-16 20:25:11 -07:00
Jeffrey Morgan
9386073e96 app: dont listen for disconnect events 2023-07-16 19:21:50 -07:00
Jeffrey Morgan
52ea4d4bb2 app: use app.on('before-quit') to detect app closing 2023-07-16 19:18:12 -07:00
Jeffrey Morgan
c4ba192187 app: use enum for steps 2023-07-16 18:47:23 -07:00
Jeffrey Morgan
fe758ca319 app: do not restart the server if app is closing 2023-07-16 18:41:43 -07:00
Jeffrey Morgan
08b933cc10 app: use async and `await instead of callbacks 2023-07-16 18:38:37 -07:00
Jeffrey Morgan
6746a00af8 app: format app.tsx 2023-07-16 18:29:11 -07:00
Patrick Devine
2fb52261ad basic distribution w/ push/pull (#78)
* basic distribution w/ push/pull

* add the parser

* add create, pull, and push

* changes to the parser, FROM line, and fix commands

* mkdirp new manifest directories

* make `blobs` directory if it does not exist

* fix go warnings

* add progressbar for model pulls

* move model struct

---------

Co-authored-by: Jeffrey Morgan <jmorganca@gmail.com>
2023-07-16 17:02:22 -07:00
Jeffrey Morgan
6fdea03049 docs: remove python.md 2023-07-14 21:41:46 -07:00
Michael Yang
38021ba494 Merge pull request #83 from jmorganca/multibyte-responses
fix multibyte responses
2023-07-14 20:12:12 -07:00
Michael Yang
6c9fa573ae Merge pull request #82 from jmorganca/filepath
windows build
2023-07-14 20:11:55 -07:00
Michael Yang
40c9dc0a31 fix multibyte responses 2023-07-14 20:11:44 -07:00
Michael Yang
0142660bd4 size_t 2023-07-14 17:29:16 -07:00
Michael Yang
743e957d88 use filepath for os compat 2023-07-14 17:27:14 -07:00
Jeffrey Morgan
560f36e6c8 app: set first-time-run to true instead of false 2023-07-14 16:50:12 -07:00
hoyyeva
e88dd25bab ollama app welcome screen for first time run (#80) 2023-07-14 16:34:24 -07:00
Michael Yang
567e74e7d7 Merge pull request #81 from jmorganca/fix-race-2
fix race
2023-07-14 15:12:01 -07:00
Michael Yang
5ade3db040 fix race
block on write which only returns when the channel is closed. this is
contrary to the previous arrangement where the handler may return but
the stream hasn't finished writing. it can lead to the client receiving
unexpected responses (since the request has been handled) or worst case
a nil-pointer dereference as the stream tries to flush a nil writer
2023-07-14 15:10:46 -07:00
Michael Yang
965f9ad033 Merge pull request #77 from jmorganca/mem
continue conversation
2023-07-14 14:57:42 -07:00
Michael Yang
5d1c6b7499 Merge pull request #79 from jmorganca/fix-typo
fix typo
2023-07-14 10:50:44 -07:00
Michael Yang
5fefaa5d4d fix typo 2023-07-14 10:47:18 -07:00
Michael Yang
1775647f76 continue conversation
feed responses back into the llm
2023-07-13 17:13:00 -07:00
Michael Yang
77dc1a6d74 Merge pull request #74 from jmorganca/timings
Timings
2023-07-13 10:17:13 -07:00
Michael Yang
05e08d2310 return more info in generate response 2023-07-13 09:37:32 -07:00
Michael Yang
31590284a7 fix route 2023-07-12 19:21:49 -07:00
Michael Yang
f2863cc7f8 Merge pull request #76 from jmorganca/fix-pull
fix pull race
2023-07-12 19:21:13 -07:00
Jeffrey Morgan
4dd296e155 build app in publish script 2023-07-12 19:16:39 -07:00
Jeffrey Morgan
304f419429 update README.md API reference 2023-07-12 19:16:28 -07:00
Michael Yang
2666d3c206 fix pull race 2023-07-12 19:07:23 -07:00
Jeffrey Morgan
787d965331 web: disable signup button while submitting 2023-07-12 17:32:27 -07:00
Jeffrey Morgan
e6eee0732c web: fix npm build 2023-07-12 17:28:00 -07:00
Jeffrey Morgan
4c2b4589ac web: newsletter signup on download page 2023-07-12 17:26:20 -07:00
Michael Yang
5571ed5248 Merge pull request #73 from jmorganca/generate-eof
fix eof error in generate
2023-07-12 11:09:23 -07:00
Michael Yang
0944b01e7d pull fixes 2023-07-12 09:55:07 -07:00
Jeffrey Morgan
5028de2901 update vicuna model 2023-07-12 09:42:26 -07:00
Michael Yang
e1f0a0dc74 fix eof error in generate 2023-07-12 09:36:16 -07:00
Michael Yang
b227261f21 Merge pull request #71 from jmorganca/llama-errors
error checking new model
2023-07-12 09:20:33 -07:00
Jeffrey Morgan
c63f811909 return error if model fails to load 2023-07-11 20:32:26 -07:00
Jeffrey Morgan
7c71c10d4f fix compilation issue in Dockerfile, remove from README.md until ready 2023-07-11 19:51:08 -07:00
Michael Yang
c5f7eadd87 error checking new model 2023-07-11 17:07:41 -07:00
Jeffrey Morgan
dcb6ba389a app: trim server lines before logging 2023-07-11 16:43:19 -07:00
Jeffrey Morgan
ed6abba75a app: bundle real ggml-metal.metal instead of symlink 2023-07-11 16:36:39 -07:00
Jeffrey Morgan
b52a400cdf use go build on publish 2023-07-11 16:17:45 -07:00
Jeffrey Morgan
2ed26f0047 tweak logging 2023-07-11 16:16:38 -07:00
Jeffrey Morgan
e64ef69e34 look for ggml-metal in the same directory as the binary 2023-07-11 15:58:56 -07:00
Jeffrey Morgan
3d0a9b477b log to console as well as file 2023-07-11 15:52:22 -07:00
Michael Yang
7226980fb6 Merge pull request #70 from jmorganca/offline-fixes
offline fixes
2023-07-11 15:50:19 -07:00
Michael Yang
a806b03f62 no errgroup 2023-07-11 14:58:10 -07:00
Michael Yang
948323fa78 rename partial file 2023-07-11 13:50:26 -07:00
Michael Yang
e243329e2e check api status 2023-07-11 13:42:05 -07:00
Michael Yang
2a66a1164a common stream producer 2023-07-11 13:42:05 -07:00
Michael Yang
62620914e9 Merge pull request #65 from jmorganca/bindings
call llama.cpp directly from go
2023-07-11 12:01:03 -07:00
Michael Yang
442dec1c6f vendor llama.cpp 2023-07-11 11:59:18 -07:00
Michael Yang
fd4792ec56 call llama.cpp directly from go 2023-07-11 11:59:18 -07:00
hoyyeva
abaf7d3bda Merge pull request #67 from jmorganca/log
writing logs to `./ollama/logs`
2023-07-11 14:45:21 -04:00
Eva Ho
7762584fb1 address comments 2023-07-11 14:38:38 -04:00
Jeffrey Morgan
317615fd5c web: remove unused code 2023-07-11 11:05:45 -07:00
Eva Ho
acc31427dd add logs to ~/.ollama/logs folder 2023-07-11 13:33:32 -04:00
Jeffrey Morgan
a3ec1ec2a0 consistent error handling for pull and generate 2023-07-10 21:34:15 -07:00
Eva Ho
407a5cabf4 when app is running, server restarts when it exits or disconnects 2023-07-10 17:14:25 -04:00
Michael Yang
0859d50942 Merge pull request #58 from jmorganca/generate-errors
return error in generate response
2023-07-10 14:03:47 -07:00
Jeffrey Morgan
66bbf05918 start server in both dev and when packaged 2023-07-10 13:46:31 -07:00
Michael Yang
edba935d67 return error in generate response 2023-07-10 13:30:10 -07:00
Bruce MacDonald
2d49197b3b increase default model size to 512 2023-07-10 21:24:41 +02:00
Bruce MacDonald
f5e2e150b8 allow overriding default generate options 2023-07-10 20:58:02 +02:00
Jeffrey Morgan
268e362fa7 fix binding build 2023-07-10 11:33:43 -07:00
Bruce MacDonald
07a4c1e3fb take all args as one prompt 2023-07-10 06:05:09 -04:00
Jeffrey Morgan
20dae6b38f add vercel.json to silence PR comments 2023-07-09 20:11:37 -07:00
Jeffrey Morgan
a18e6b3a40 llama: remove unnecessary std::vector 2023-07-09 10:51:45 -04:00
Jeffrey Morgan
5fb96255dc llama: remove unused helper functions 2023-07-09 10:25:07 -04:00
Jeffrey Morgan
b43ddd84be update README.md instructions section 2023-07-08 19:19:31 -04:00
Jeffrey Morgan
993cb9fad6 examples: add basic python example 2023-07-08 17:40:05 -04:00
Jeffrey Morgan
a8dc0c9b5f web: use proper caching for autoupdate endpoint 2023-07-08 16:48:02 -04:00
Jeffrey Morgan
1e97807808 web: revalidate download every minute 2023-07-08 13:45:47 -04:00
Jeffrey Morgan
840f87430a remove double heartbeat 2023-07-08 13:30:27 -04:00
Bruce MacDonald
4d8b0414f7 take all args as one prompt
- parse all run arguments into one prompt
- do not echo prompt back on one-shot
- example of summarizing a document
2023-07-07 16:14:58 -04:00
89 changed files with 42574 additions and 2519 deletions

4
.gitignore vendored
View File

@@ -2,9 +2,5 @@
.vscode
.env
.venv
*.spec
build
dist
__pycache__
ollama
ggml-metal.metal

View File

@@ -1,8 +1,6 @@
FROM golang:1.20
RUN apt-get update && apt-get install -y cmake
WORKDIR /go/src/github.com/jmorganca/ollama
COPY . .
RUN cmake -S llama -B llama/build && cmake --build llama/build
RUN CGO_ENABLED=1 go build -ldflags '-linkmode external -extldflags "-static"' .
FROM alpine

View File

@@ -1,19 +0,0 @@
default: ollama
.PHONY: llama
llama:
cmake -S llama -B llama/build -DLLAMA_METAL=on
cmake --build llama/build
.PHONY: ollama
ollama: llama
go build .
.PHONY: app
app: ollama
npm install --prefix app
npm run --prefix app make:sign
clean:
go clean
rm -rf llama/build

116
README.md
View File

@@ -1,101 +1,109 @@
![ollama](https://github.com/jmorganca/ollama/assets/251292/961f99bb-251a-4eec-897d-1ba99997ad0f)
<div align="center">
<picture>
<source media="(prefers-color-scheme: dark)" height="200px" srcset="https://github.com/jmorganca/ollama/assets/3325447/318048d2-b2dd-459c-925a-ac8449d5f02c">
<img alt="logo" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/c7d6e15f-7f4d-4776-b568-c084afa297c2">
</picture>
</div>
# Ollama
Run large language models with `llama.cpp`.
[![Discord](https://dcbadge.vercel.app/api/server/ollama?style=flat&compact=true)](https://discord.gg/ollama)
> Note: certain models that can be run with Ollama are intended for research and/or non-commercial use only.
Create, run, and share large language models (LLMs). Ollama bundles a models weights, configuration, prompts, and more into self-contained packages that can run on any machine.
### Features
> Note: Ollama is in early preview. Please report any issues you find.
- Download and run popular large language models
- Switch between multiple models on the fly
- Hardware acceleration where available (Metal, CUDA)
- Fast inference server written in Go, powered by [llama.cpp](https://github.com/ggerganov/llama.cpp)
- REST API to use with your application (python, typescript SDKs coming soon)
## Download
## Install
- [Download](https://ollama.ai/download) for macOS
- Download for Windows (coming soon)
- Docker: `docker run -p 11434:11434 ollama/ollama`
You can also build the [binary from source](#building).
- [Download](https://ollama.ai/download) for macOS on Apple Silicon (Intel coming soon)
- Download for Windows and Linux (coming soon)
- Build [from source](#building)
## Quickstart
Run a fast and simple model.
To run and chat with [Llama 2](https://ai.meta.com/llama), the new model by Meta:
```
ollama run orca
ollama run llama2
```
## Example models
## Model library
### 💬 Chat
Ollama includes a library of open-source, pre-trained models. More models are coming soon. You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
Have a conversation.
| Model | Parameters | Size | Download |
| ------------------------ | ---------- | ----- | --------------------------- |
| Llama2 | 7B | 3.8GB | `ollama pull llama2` |
| Llama2 13B | 13B | 7.3GB | `ollama pull llama2:13b` |
| Orca Mini | 3B | 1.9GB | `ollama pull orca` |
| Vicuna | 7B | 3.8GB | `ollama pull vicuna` |
| Nous-Hermes | 13B | 7.3GB | `ollama pull nous-hermes` |
| Wizard Vicuna Uncensored | 13B | 7.3GB | `ollama pull wizard-vicuna` |
## Examples
### Run a model
```
ollama run vicuna "Why is the sky blue?"
ollama run llama2
>>> hi
Hello! How can I help you today?
```
### 🗺️ Instructions
### Create a custom character model
Ask questions. Get answers.
Pull a base model:
```
ollama run orca "Write an email to my boss."
ollama pull orca
```
### 📖 Storytelling
Venture into the unknown.
Create a `Modelfile`:
```
ollama run nous-hermes "Once upon a time"
FROM orca
PROMPT """
### System:
You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
### User:
{{ .Prompt }}
### Response:
"""
```
## Advanced usage
### Run a local model
Next, create and run the model:
```
ollama run ~/Downloads/vicuna-7b-v1.3.ggmlv3.q4_1.bin
ollama create mario -f ./Modelfile
ollama run mario
>>> hi
Hello! It's your friend Mario.
```
For more info on `Modelfile` syntax see [this doc](./docs/modelfile).
### Pull a model from the registry
```
ollama pull nous-hermes
```
## Building
```
make
go build .
```
To run it start the server:
```
./ollama server &
./ollama serve &
```
Finally, run a model!
```
./ollama run ~/Downloads/vicuna-7b-v1.3.ggmlv3.q4_1.bin
```
## API Reference
### `POST /api/pull`
Download a model
```
curl -X POST http://localhost:11343/api/pull -d '{"model": "orca"}'
```
### `POST /api/generate`
Complete a prompt
```
curl -X POST http://localhost:11434/api/generate -d '{"model": "orca", "prompt": "hello!", "stream": true}'
./ollama run llama2
```

View File

@@ -5,13 +5,32 @@ import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
)
type Client struct {
base url.URL
base url.URL
HTTP http.Client
Headers http.Header
}
func checkError(resp *http.Response, body []byte) error {
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
return nil
}
apiError := StatusError{StatusCode: resp.StatusCode}
err := json.Unmarshal(body, &apiError)
if err != nil {
// Use the full body as the message if we fail to decode a response.
apiError.Message = string(body)
}
return apiError
}
func NewClient(hosts ...string) *Client {
@@ -22,38 +41,72 @@ func NewClient(hosts ...string) *Client {
return &Client{
base: url.URL{Scheme: "http", Host: host},
HTTP: http.Client{},
}
}
type options struct {
requestBody io.Reader
responseFunc func(bts []byte) error
}
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
var reqBody io.Reader
var data []byte
var err error
if reqData != nil {
data, err = json.Marshal(reqData)
if err != nil {
return err
}
reqBody = bytes.NewReader(data)
}
func OptionRequestBody(data any) func(*options) {
bts, err := json.Marshal(data)
url := c.base.JoinPath(path).String()
req, err := http.NewRequestWithContext(ctx, method, url, reqBody)
if err != nil {
panic(err)
return err
}
return func(opts *options) {
opts.requestBody = bytes.NewReader(bts)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "application/json")
for k, v := range c.Headers {
req.Header[k] = v
}
respObj, err := c.HTTP.Do(req)
if err != nil {
return err
}
defer respObj.Body.Close()
respBody, err := io.ReadAll(respObj.Body)
if err != nil {
return err
}
if err := checkError(respObj, respBody); err != nil {
return err
}
if len(respBody) > 0 && respData != nil {
if err := json.Unmarshal(respBody, respData); err != nil {
return err
}
}
return nil
}
func OptionResponseFunc(fn func([]byte) error) func(*options) {
return func(opts *options) {
opts.responseFunc = fn
}
}
func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error {
var buf *bytes.Buffer
if data != nil {
bts, err := json.Marshal(data)
if err != nil {
return err
}
func (c *Client) stream(ctx context.Context, method, path string, fns ...func(*options)) error {
var opts options
for _, fn := range fns {
fn(&opts)
buf = bytes.NewBuffer(bts)
}
request, err := http.NewRequestWithContext(ctx, method, c.base.JoinPath(path).String(), opts.requestBody)
request, err := http.NewRequestWithContext(ctx, method, c.base.JoinPath(path).String(), buf)
if err != nil {
return err
}
@@ -67,13 +120,28 @@ func (c *Client) stream(ctx context.Context, method, path string, fns ...func(*o
}
defer response.Body.Close()
if opts.responseFunc != nil {
scanner := bufio.NewScanner(response.Body)
for scanner.Scan() {
if err := opts.responseFunc(scanner.Bytes()); err != nil {
return err
scanner := bufio.NewScanner(response.Body)
for scanner.Scan() {
var errorResponse struct {
Error string `json:"error,omitempty"`
}
bts := scanner.Bytes()
if err := json.Unmarshal(bts, &errorResponse); err != nil {
return fmt.Errorf("unmarshal: %w", err)
}
if response.StatusCode >= 400 {
return StatusError{
StatusCode: response.StatusCode,
Status: response.Status,
Message: errorResponse.Error,
}
}
if err := fn(bts); err != nil {
return err
}
}
return nil
@@ -82,36 +150,59 @@ func (c *Client) stream(ctx context.Context, method, path string, fns ...func(*o
type GenerateResponseFunc func(GenerateResponse) error
func (c *Client) Generate(ctx context.Context, req *GenerateRequest, fn GenerateResponseFunc) error {
return c.stream(ctx, http.MethodPost, "/api/generate",
OptionRequestBody(req),
OptionResponseFunc(func(bts []byte) error {
var resp GenerateResponse
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
return c.stream(ctx, http.MethodPost, "/api/generate", req, func(bts []byte) error {
var resp GenerateResponse
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
return fn(resp)
}),
)
return fn(resp)
})
}
type PullProgressFunc func(PullProgress) error
type PullProgressFunc func(ProgressResponse) error
func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc) error {
return c.stream(ctx, http.MethodPost, "/api/pull",
OptionRequestBody(req),
OptionResponseFunc(func(bts []byte) error {
var resp PullProgress
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
return c.stream(ctx, http.MethodPost, "/api/pull", req, func(bts []byte) error {
var resp ProgressResponse
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
if resp.Error.Message != "" {
// couldn't pull the model from the directory, proceed anyway
return nil
}
return fn(resp)
}),
)
return fn(resp)
})
}
type PushProgressFunc func(ProgressResponse) error
func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc) error {
return c.stream(ctx, http.MethodPost, "/api/push", req, func(bts []byte) error {
var resp ProgressResponse
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
return fn(resp)
})
}
type CreateProgressFunc func(CreateProgress) error
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
var resp CreateProgress
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
return fn(resp)
})
}
func (c *Client) List(ctx context.Context) (*ListResponse, error) {
var lr ListResponse
if err := c.do(ctx, http.MethodGet, "/api/tags", nil, &lr); err != nil {
return nil, err
}
return &lr, nil
}

View File

@@ -2,124 +2,172 @@ package api
import (
"fmt"
"net/http"
"strings"
"os"
"runtime"
"time"
)
type Error struct {
Code int32 `json:"code"`
Message string `json:"message"`
type StatusError struct {
StatusCode int
Status string
Message string
}
func (e Error) Error() string {
if e.Message == "" {
return fmt.Sprintf("%d %v", e.Code, strings.ToLower(http.StatusText(int(e.Code))))
func (e StatusError) Error() string {
if e.Message != "" {
return fmt.Sprintf("%s: %s", e.Status, e.Message)
}
return e.Message
}
type PullRequest struct {
Model string `json:"model"`
}
type PullProgress struct {
Total int64 `json:"total"`
Completed int64 `json:"completed"`
Percent float64 `json:"percent"`
Error Error `json:"error"`
return e.Status
}
type GenerateRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Model string `json:"model"`
Prompt string `json:"prompt"`
Context []int `json:"context,omitempty"`
ModelOptions `json:"model_opts,omitempty"`
PredictOptions `json:"predict_opts,omitempty"`
Options `json:"options"`
}
type ModelOptions struct {
ContextSize int `json:"context_size,omitempty"`
Seed int `json:"seed,omitempty"`
NBatch int `json:"n_batch,omitempty"`
F16Memory bool `json:"memory_f16,omitempty"`
MLock bool `json:"mlock,omitempty"`
MMap bool `json:"mmap,omitempty"`
VocabOnly bool `json:"vocab_only,omitempty"`
LowVRAM bool `json:"low_vram,omitempty"`
Embeddings bool `json:"embeddings,omitempty"`
NUMA bool `json:"numa,omitempty"`
NGPULayers int `json:"gpu_layers,omitempty"`
MainGPU string `json:"main_gpu,omitempty"`
TensorSplit string `json:"tensor_split,omitempty"`
type CreateRequest struct {
Name string `json:"name"`
Path string `json:"path"`
}
type PredictOptions struct {
Seed int `json:"seed,omitempty"`
Threads int `json:"threads,omitempty"`
Tokens int `json:"tokens,omitempty"`
TopK int `json:"top_k,omitempty"`
Repeat int `json:"repeat,omitempty"`
Batch int `json:"batch,omitempty"`
NKeep int `json:"nkeep,omitempty"`
TopP float64 `json:"top_p,omitempty"`
Temperature float64 `json:"temp,omitempty"`
Penalty float64 `json:"penalty,omitempty"`
F16KV bool
DebugMode bool
StopPrompts []string
IgnoreEOS bool `json:"ignore_eos,omitempty"`
TailFreeSamplingZ float64 `json:"tfs_z,omitempty"`
TypicalP float64 `json:"typical_p,omitempty"`
FrequencyPenalty float64 `json:"freq_penalty,omitempty"`
PresencePenalty float64 `json:"pres_penalty,omitempty"`
Mirostat int `json:"mirostat,omitempty"`
MirostatETA float64 `json:"mirostat_lr,omitempty"`
MirostatTAU float64 `json:"mirostat_ent,omitempty"`
PenalizeNL bool `json:"penalize_nl,omitempty"`
LogitBias string `json:"logit_bias,omitempty"`
PathPromptCache string
MLock bool `json:"mlock,omitempty"`
MMap bool `json:"mmap,omitempty"`
PromptCacheAll bool
PromptCacheRO bool
MainGPU string
TensorSplit string
type CreateProgress struct {
Status string `json:"status"`
}
var DefaultModelOptions ModelOptions = ModelOptions{
ContextSize: 128,
Seed: 0,
F16Memory: true,
MLock: false,
Embeddings: true,
MMap: true,
LowVRAM: false,
type PullRequest struct {
Name string `json:"name"`
Username string `json:"username"`
Password string `json:"password"`
}
var DefaultPredictOptions PredictOptions = PredictOptions{
Seed: -1,
Threads: -1,
Tokens: 512,
Penalty: 1.1,
Repeat: 64,
Batch: 512,
NKeep: 64,
TopK: 90,
TopP: 0.86,
TailFreeSamplingZ: 1.0,
TypicalP: 1.0,
Temperature: 0.8,
FrequencyPenalty: 0.0,
PresencePenalty: 0.0,
Mirostat: 0,
MirostatTAU: 5.0,
MirostatETA: 0.1,
MMap: true,
StopPrompts: []string{"llama"},
type ProgressResponse struct {
Status string `json:"status"`
Digest string `json:"digest,omitempty"`
Total int `json:"total,omitempty"`
Completed int `json:"completed,omitempty"`
}
type PushRequest struct {
Name string `json:"name"`
Username string `json:"username"`
Password string `json:"password"`
}
type ListResponse struct {
Models []ListResponseModel `json:"models"`
}
type ListResponseModel struct {
Name string `json:"name"`
ModifiedAt time.Time `json:"modified_at"`
Size int `json:"size"`
}
type GenerateResponse struct {
Response string `json:"response"`
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Response string `json:"response,omitempty"`
Done bool `json:"done"`
Context []int `json:"context,omitempty"`
TotalDuration time.Duration `json:"total_duration,omitempty"`
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"`
EvalCount int `json:"eval_count,omitempty"`
EvalDuration time.Duration `json:"eval_duration,omitempty"`
}
func (r *GenerateResponse) Summary() {
if r.TotalDuration > 0 {
fmt.Fprintf(os.Stderr, "total duration: %v\n", r.TotalDuration)
}
if r.PromptEvalCount > 0 {
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
}
if r.PromptEvalDuration > 0 {
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", r.PromptEvalDuration)
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(r.PromptEvalCount)/r.PromptEvalDuration.Seconds())
}
if r.EvalCount > 0 {
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", r.EvalCount)
}
if r.EvalDuration > 0 {
fmt.Fprintf(os.Stderr, "eval duration: %s\n", r.EvalDuration)
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(r.EvalCount)/r.EvalDuration.Seconds())
}
}
type Options struct {
Seed int `json:"seed,omitempty"`
// Backend options
UseNUMA bool `json:"numa,omitempty"`
// Model options
NumCtx int `json:"num_ctx,omitempty"`
NumBatch int `json:"num_batch,omitempty"`
NumGPU int `json:"num_gpu,omitempty"`
MainGPU int `json:"main_gpu,omitempty"`
LowVRAM bool `json:"low_vram,omitempty"`
F16KV bool `json:"f16_kv,omitempty"`
LogitsAll bool `json:"logits_all,omitempty"`
VocabOnly bool `json:"vocab_only,omitempty"`
UseMMap bool `json:"use_mmap,omitempty"`
UseMLock bool `json:"use_mlock,omitempty"`
EmbeddingOnly bool `json:"embedding_only,omitempty"`
// Predict options
RepeatLastN int `json:"repeat_last_n,omitempty"`
RepeatPenalty float32 `json:"repeat_penalty,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"`
Temperature float32 `json:"temperature,omitempty"`
TopK int `json:"top_k,omitempty"`
TopP float32 `json:"top_p,omitempty"`
TFSZ float32 `json:"tfs_z,omitempty"`
TypicalP float32 `json:"typical_p,omitempty"`
Mirostat int `json:"mirostat,omitempty"`
MirostatTau float32 `json:"mirostat_tau,omitempty"`
MirostatEta float32 `json:"mirostat_eta,omitempty"`
NumThread int `json:"num_thread,omitempty"`
}
func DefaultOptions() Options {
return Options{
Seed: -1,
UseNUMA: false,
NumCtx: 2048,
NumBatch: 512,
NumGPU: 1,
LowVRAM: false,
F16KV: true,
UseMMap: true,
UseMLock: false,
RepeatLastN: 512,
RepeatPenalty: 1.1,
FrequencyPenalty: 0.0,
PresencePenalty: 0.0,
Temperature: 0.8,
TopK: 40,
TopP: 0.9,
TFSZ: 1.0,
TypicalP: 1.0,
Mirostat: 0,
MirostatTau: 5.0,
MirostatEta: 0.1,
NumThread: runtime.NumCPU(),
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 442 B

After

Width:  |  Height:  |  Size: 403 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 889 B

After

Width:  |  Height:  |  Size: 741 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 445 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 891 B

View File

@@ -1,4 +1,4 @@
import type { ForgeConfig, ResolvedForgeConfig, ForgeMakeResult } from '@electron-forge/shared-types'
import type { ForgeConfig } from '@electron-forge/shared-types'
import { MakerSquirrel } from '@electron-forge/maker-squirrel'
import { MakerZIP } from '@electron-forge/maker-zip'
import { PublisherGithub } from '@electron-forge/publisher-github'
@@ -21,7 +21,9 @@ const config: ForgeConfig = {
'../ollama',
path.join(__dirname, './assets/ollama_icon_16x16Template.png'),
path.join(__dirname, './assets/ollama_icon_16x16Template@2x.png'),
...(process.platform === 'darwin' ? ['../ggml-metal.metal'] : []),
path.join(__dirname, './assets/ollama_outline_icon_16x16Template.png'),
path.join(__dirname, './assets/ollama_outline_icon_16x16Template@2x.png'),
...(process.platform === 'darwin' ? ['../llama/ggml-metal.metal'] : []),
],
...(process.env.SIGN
? {
@@ -58,7 +60,7 @@ const config: ForgeConfig = {
new AutoUnpackNativesPlugin({}),
new WebpackPlugin({
mainConfig,
devContentSecurityPolicy: `default-src * 'unsafe-eval' 'unsafe-inline'`,
devContentSecurityPolicy: `default-src * 'unsafe-eval' 'unsafe-inline'; img-src data: 'self'`,
renderer: {
config: rendererConfig,
nodeIntegration: true,

2517
app/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,6 +30,7 @@
"@electron-forge/plugin-auto-unpack-natives": "^6.2.1",
"@electron-forge/plugin-webpack": "^6.2.1",
"@electron-forge/publisher-github": "^6.2.1",
"@svgr/webpack": "^8.0.1",
"@types/chmodr": "^1.0.0",
"@types/node": "^20.4.0",
"@types/react": "^18.2.14",
@@ -54,21 +55,27 @@
"prettier": "^2.8.8",
"prettier-plugin-tailwindcss": "^0.3.0",
"style-loader": "^3.3.3",
"svg-inline-loader": "^0.8.2",
"tailwindcss": "^3.3.2",
"ts-loader": "^9.4.3",
"ts-node": "^10.9.1",
"typescript": "~4.5.4",
"url-loader": "^4.1.1",
"webpack": "^5.88.0",
"webpack-cli": "^5.1.4",
"webpack-dev-server": "^4.15.1"
},
"dependencies": {
"@electron/remote": "^2.0.10",
"@heroicons/react": "^2.0.18",
"@segment/analytics-node": "^1.0.0",
"copy-to-clipboard": "^3.3.3",
"electron-squirrel-startup": "^1.0.0",
"electron-store": "^8.1.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"uuid": "^9.0.0"
"uuid": "^9.0.0",
"winston": "^3.10.0",
"winston-daily-rotate-file": "^4.7.1"
}
}

View File

@@ -11,6 +11,10 @@ body {
-webkit-app-region: drag;
}
.no-drag {
-webkit-app-region: no-drag;
}
.blink {
-webkit-animation: 1s blink step-end infinite;
-moz-animation: 1s blink step-end infinite;

View File

@@ -1,158 +1,115 @@
import { useState } from 'react'
import path from 'path'
import os from 'os'
import { dialog, getCurrentWindow } from '@electron/remote'
import copy from 'copy-to-clipboard'
import { CheckIcon, DocumentDuplicateIcon } from '@heroicons/react/24/outline'
import Store from 'electron-store'
import { getCurrentWindow } from '@electron/remote'
const API_URL = 'http://127.0.0.1:7734'
import { install } from './install'
import OllamaIcon from './ollama.svg'
type Message = {
sender: 'bot' | 'human'
content: string
}
const store = new Store()
const userInfo = os.userInfo()
async function generate(prompt: string, model: string, callback: (res: string) => void) {
const result = await fetch(`${API_URL}/generate`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt,
model,
}),
})
if (!result.ok) {
return
}
let reader = result.body.getReader()
while (true) {
const { done, value } = await reader.read()
if (done) {
break
}
let decoder = new TextDecoder()
let str = decoder.decode(value)
let re = /}\s*{/g
str = '[' + str.replace(re, '},{') + ']'
let messages = JSON.parse(str)
for (const message of messages) {
const choice = message.choices[0]
callback(choice.text)
if (choice.finish_reason === 'stop') {
break
}
}
}
return
enum Step {
WELCOME = 0,
CLI,
FINISH,
}
export default function () {
const [prompt, setPrompt] = useState('')
const [messages, setMessages] = useState<Message[]>([])
const [model, setModel] = useState('')
const [generating, setGenerating] = useState(false)
const [step, setStep] = useState<Step>(Step.WELCOME)
const [commandCopied, setCommandCopied] = useState<boolean>(false)
const command = 'ollama run llama2'
return (
<div className='flex min-h-screen flex-1 flex-col justify-between bg-white'>
<header className='drag sticky top-0 z-50 flex h-14 w-full flex-row items-center border-b border-black/10 bg-white/75 backdrop-blur-md'>
<div className='mx-auto w-full max-w-xl leading-none'>
<h1 className='text-sm font-medium'>{path.basename(model).replace('.bin', '')}</h1>
</div>
</header>
{model ? (
<section className='mx-auto mb-10 w-full max-w-xl flex-1 break-words'>
{messages.map((m, i) => (
<div className='my-4 flex gap-4' key={i}>
<div className='flex-none pr-1 text-lg'>
{m.sender === 'human' ? (
<div className='mt-px flex h-6 w-6 items-center justify-center rounded-md bg-neutral-200 text-sm text-neutral-700'>
{userInfo.username[0].toUpperCase()}
</div>
) : (
<div className='mt-0.5 flex h-6 w-6 items-center justify-center rounded-md bg-blue-600 text-sm text-white'>
{path.basename(model)[0].toUpperCase()}
</div>
)}
</div>
<div className='flex-1 text-gray-800'>
{m.content}
{m.sender === 'bot' && generating && i === messages.length - 1 && (
<span className='blink relative -top-[3px] left-1 text-[10px]'></span>
)}
<div className='drag'>
<div className='mx-auto flex min-h-screen w-full flex-col justify-between bg-white px-4 pt-16'>
{step === Step.WELCOME && (
<>
<div className='mx-auto text-center'>
<h1 className='mb-6 mt-4 text-2xl tracking-tight text-gray-900'>Welcome to Ollama</h1>
<p className='mx-auto w-[65%] text-sm text-gray-400'>
Let's get you up and running with your own large language models.
</p>
<button
onClick={() => setStep(Step.CLI)}
className='no-drag rounded-dm mx-auto my-8 w-[40%] rounded-md bg-black px-4 py-2 text-sm text-white hover:brightness-110'
>
Next
</button>
</div>
<div className='mx-auto'>
<OllamaIcon />
</div>
</>
)}
{step === Step.CLI && (
<>
<div className='mx-auto flex flex-col space-y-28 text-center'>
<h1 className='mt-4 text-2xl tracking-tight text-gray-900'>Install the command line</h1>
<pre className='mx-auto text-4xl text-gray-400'>&gt; ollama</pre>
<div className='mx-auto'>
<button
onClick={async () => {
await install()
getCurrentWindow().show()
getCurrentWindow().focus()
setStep(Step.FINISH)
}}
className='no-drag rounded-dm mx-auto w-[60%] rounded-md bg-black px-4 py-2 text-sm text-white hover:brightness-110'
>
Install
</button>
<p className='mx-auto my-4 w-[70%] text-xs text-gray-400'>
You will be prompted for administrator access
</p>
</div>
</div>
))}
</section>
) : (
<section className='flex flex-1 select-none flex-col items-center justify-center pb-20'>
<h2 className='text-3xl font-light text-neutral-400'>No model selected</h2>
<button
onClick={async () => {
const res = await dialog.showOpenDialog(getCurrentWindow(), {
properties: ['openFile', 'multiSelections'],
})
if (res.canceled) {
return
}
setModel(res.filePaths[0])
}}
className='rounded-dm my-8 rounded-md bg-blue-600 px-4 py-2 text-sm text-white hover:brightness-110'
>
Open file...
</button>
</section>
)}
<div className='sticky bottom-0 bg-gradient-to-b from-transparent to-white'>
{model && (
<textarea
autoFocus
rows={1}
value={prompt}
placeholder='Send a message...'
onChange={e => setPrompt(e.target.value)}
className='mx-auto my-4 block w-full max-w-xl resize-none rounded-xl border border-gray-200 px-5 py-3.5 text-[15px] shadow-lg shadow-black/5 focus:outline-none'
onKeyDownCapture={async e => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
if (generating) {
return
}
if (!prompt) {
return
}
await setMessages(messages => {
return [...messages, { sender: 'human', content: prompt }, { sender: 'bot', content: '' }]
})
setPrompt('')
setGenerating(true)
await generate(prompt, model, res => {
setMessages(messages => {
let last = messages[messages.length - 1]
return [...messages.slice(0, messages.length - 1), { ...last, content: last.content + res }]
})
})
setGenerating(false)
}
}}
></textarea>
</>
)}
{step === Step.FINISH && (
<>
<div className='mx-auto flex flex-col space-y-20 text-center'>
<h1 className='mt-4 text-2xl tracking-tight text-gray-900'>Run your first model</h1>
<div className='flex flex-col'>
<div className='group relative flex items-center'>
<pre className='language-none text-2xs w-full rounded-md bg-gray-100 px-4 py-3 text-start leading-normal'>
{command}
</pre>
<button
className={`no-drag absolute right-[5px] px-2 py-2 ${
commandCopied
? 'text-gray-900 opacity-100 hover:cursor-auto'
: 'text-gray-200 opacity-50 hover:cursor-pointer'
} hover:font-bold hover:text-gray-900 group-hover:opacity-100`}
onClick={() => {
copy(command)
setCommandCopied(true)
setTimeout(() => setCommandCopied(false), 3000)
}}
>
{commandCopied ? (
<CheckIcon className='h-4 w-4 font-bold text-gray-500' />
) : (
<DocumentDuplicateIcon className='h-4 w-4 text-gray-500' />
)}
</button>
</div>
<p className='mx-auto my-4 w-[70%] text-xs text-gray-400'>
Run this command in your favorite terminal.
</p>
</div>
<button
onClick={() => {
store.set('first-time-run', true)
window.close()
}}
className='no-drag rounded-dm mx-auto w-[60%] rounded-md bg-black px-4 py-2 text-sm text-white hover:brightness-110'
>
Finish
</button>
</div>
</>
)}
</div>
</div>

4
app/src/declarations.d.ts vendored Normal file
View File

@@ -0,0 +1,4 @@
declare module '*.svg' {
const content: string;
export default content;
}

View File

@@ -1,161 +1,184 @@
import { spawn, exec } from 'child_process'
import { app, autoUpdater, dialog, Tray, Menu } from 'electron'
import { spawn } from 'child_process'
import { app, autoUpdater, dialog, Tray, Menu, BrowserWindow, nativeTheme } from 'electron'
import Store from 'electron-store'
import winston from 'winston'
import 'winston-daily-rotate-file'
import * as path from 'path'
import * as fs from 'fs'
import { analytics, id } from './telemetry'
import { installed } from './install'
require('@electron/remote/main').initialize()
const store = new Store()
let tray: Tray | null = null
let welcomeWindow: BrowserWindow | null = null
declare const MAIN_WINDOW_WEBPACK_ENTRY: string
const logger = winston.createLogger({
transports: [
new winston.transports.Console(),
new winston.transports.File({
filename: path.join(app.getPath('home'), '.ollama', 'logs', 'server.log'),
maxsize: 1024 * 1024 * 20,
maxFiles: 5,
}),
],
format: winston.format.printf(info => info.message),
})
const SingleInstanceLock = app.requestSingleInstanceLock()
if (!SingleInstanceLock) {
app.quit()
}
const createSystemtray = () => {
let iconPath = path.join(__dirname, '..', '..', 'assets', 'ollama_icon_16x16Template.png')
function firstRunWindow() {
// Create the browser window.
welcomeWindow = new BrowserWindow({
width: 400,
height: 500,
frame: false,
fullscreenable: false,
resizable: false,
movable: true,
show: false,
webPreferences: {
nodeIntegration: true,
contextIsolation: false,
},
alwaysOnTop: true,
})
require('@electron/remote/main').enable(welcomeWindow.webContents)
// and load the index.html of the app.
welcomeWindow.loadURL(MAIN_WINDOW_WEBPACK_ENTRY)
welcomeWindow.on('ready-to-show', () => welcomeWindow.show())
// for debugging
// welcomeWindow.webContents.openDevTools()
if (process.platform === 'darwin') {
app.dock.hide()
}
}
function createSystemtray() {
let iconPath = nativeTheme.shouldUseDarkColors
? path.join(__dirname, '..', '..', 'assets', 'ollama_icon_16x16Template.png')
: path.join(__dirname, '..', '..', 'assets', 'ollama_outline_icon_16x16Template.png')
if (app.isPackaged) {
iconPath = path.join(process.resourcesPath, 'ollama_icon_16x16Template.png')
iconPath = nativeTheme.shouldUseDarkColors
? path.join(process.resourcesPath, 'ollama_icon_16x16Template.png')
: path.join(process.resourcesPath, 'ollama_outline_icon_16x16Template.png')
}
tray = new Tray(iconPath)
nativeTheme.on('updated', function theThemeHasChanged () {
if (nativeTheme.shouldUseDarkColors) {
app.isPackaged
? tray.setImage(path.join(process.resourcesPath, 'ollama_icon_16x16Template.png'))
: tray.setImage(path.join(__dirname, '..', '..', 'assets', 'ollama_icon_16x16Template.png'))
} else {
app.isPackaged
? tray.setImage(path.join(process.resourcesPath, 'ollama_outline_icon_16x16Template.png'))
: tray.setImage(path.join(__dirname, '..', '..', 'assets', 'ollama_outline_icon_16x16Template.png'))
}
})
const contextMenu = Menu.buildFromTemplate([{ role: 'quit', label: 'Quit Ollama', accelerator: 'Command+Q' }])
tray.setContextMenu(contextMenu)
tray.setToolTip('Ollama')
}
// Handle creating/removing shortcuts on Windows when installing/uninstalling.
if (require('electron-squirrel-startup')) {
app.quit()
}
const ollama = path.join(process.resourcesPath, 'ollama')
// if the app is packaged then run the server
if (app.isPackaged) {
// Start the executable
console.log(`Starting server`)
const proc = spawn(ollama, ['serve'])
proc.stdout.on('data', data => {
console.log(`server: ${data}`)
})
proc.stderr.on('data', data => {
console.error(`server: ${data}`)
})
process.on('exit', () => {
proc.kill()
})
}
function server() {
const binary = app.isPackaged
? path.join(process.resourcesPath, 'ollama')
: path.resolve(__dirname, '..', '..', 'ollama')
: path.resolve(process.cwd(), '..', 'ollama')
console.log(`Starting server`)
const proc = spawn(binary, ['serve'])
proc.stdout.on('data', data => {
console.log(`server: ${data}`)
})
proc.stderr.on('data', data => {
console.error(`server: ${data}`)
logger.info(data.toString().trim())
})
process.on('exit', () => {
proc.stderr.on('data', data => {
logger.error(data.toString().trim())
})
function restart() {
logger.info('Restarting the server...')
server()
}
proc.on('exit', restart)
app.on('before-quit', () => {
proc.off('exit', restart)
proc.kill()
})
}
function installCLI() {
const symlinkPath = '/usr/local/bin/ollama'
if (fs.existsSync(symlinkPath) && fs.readlinkSync(symlinkPath) === ollama) {
return
}
dialog
.showMessageBox({
type: 'info',
title: 'Ollama CLI installation',
message: 'To make the Ollama command work in your terminal, it needs administrator privileges.',
buttons: ['OK'],
})
.then(result => {
if (result.response === 0) {
const command = `
do shell script "ln -F -s ${ollama} /usr/local/bin/ollama" with administrator privileges
`
exec(`osascript -e '${command}'`, (error: Error | null, stdout: string, stderr: string) => {
if (error) {
console.error(`exec error: ${error}`)
return
}
console.log(`stdout: ${stdout}`)
console.error(`stderr: ${stderr}`)
})
}
})
if (process.platform === 'darwin') {
app.dock.hide()
}
app.on('ready', () => {
if (process.platform === 'darwin') {
app.dock.hide()
if (app.isPackaged) {
if (!app.isInApplicationsFolder()) {
const chosen = dialog.showMessageBoxSync({
type: 'question',
buttons: ['Move to Applications', 'Do Not Move'],
message: 'Ollama works best when run from the Applications directory.',
defaultId: 0,
cancelId: 1,
})
if (!store.has('first-time-run')) {
// This is the first run
app.setLoginItemSettings({ openAtLogin: true })
store.set('first-time-run', false)
} else {
// The app has been run before
app.setLoginItemSettings({ openAtLogin: app.getLoginItemSettings().openAtLogin })
}
if (!app.isInApplicationsFolder()) {
const chosen = dialog.showMessageBoxSync({
type: 'question',
buttons: ['Move to Applications', 'Do Not Move'],
message: 'Ollama works best when run from the Applications directory.',
defaultId: 0,
cancelId: 1,
})
if (chosen === 0) {
try {
app.moveToApplicationsFolder({
conflictHandler: conflictType => {
if (conflictType === 'existsAndRunning') {
dialog.showMessageBoxSync({
type: 'info',
message: 'Cannot move to Applications directory',
detail:
'Another version of Ollama is currently running from your Applications directory. Close it first and try again.',
})
}
return true
},
})
return
} catch (e) {
console.error('Failed to move to applications folder')
console.error(e)
if (chosen === 0) {
try {
app.moveToApplicationsFolder({
conflictHandler: conflictType => {
if (conflictType === 'existsAndRunning') {
dialog.showMessageBoxSync({
type: 'info',
message: 'Cannot move to Applications directory',
detail:
'Another version of Ollama is currently running from your Applications directory. Close it first and try again.',
})
}
return true
},
})
return
} catch (e) {
logger.error(`[Move to Applications] Failed to move to applications folder - ${e.message}}`)
}
}
}
}
}
createSystemtray()
server()
if (app.isPackaged) {
installCLI()
if (store.get('first-time-run') && installed()) {
app.setLoginItemSettings({ openAtLogin: app.getLoginItemSettings().openAtLogin })
return
}
// This is the first run or the CLI is no longer installed
app.setLoginItemSettings({ openAtLogin: true })
firstRunWindow()
})
// Quit when all windows are closed, except on macOS. There, it's common
@@ -183,8 +206,6 @@ async function heartbeat() {
})
}
heartbeat()
if (app.isPackaged) {
heartbeat()
autoUpdater.checkForUpdates()
@@ -195,7 +216,7 @@ if (app.isPackaged) {
}
autoUpdater.on('error', e => {
console.error('update check failed', e)
logger.error(`update check failed - ${e.message}`)
})
autoUpdater.on('update-downloaded', (event, releaseNotes, releaseName) => {

26
app/src/install.ts Normal file
View File

@@ -0,0 +1,26 @@
import * as fs from 'fs'
import { exec as cbExec } from 'child_process'
import * as path from 'path'
import { promisify } from 'util'
const app = process && process.type === 'renderer' ? require('@electron/remote').app : require('electron').app
const ollama = app.isPackaged ? path.join(process.resourcesPath, 'ollama') : path.resolve(process.cwd(), '..', 'ollama')
const exec = promisify(cbExec)
const symlinkPath = '/usr/local/bin/ollama'
export function installed() {
return fs.existsSync(symlinkPath) && fs.readlinkSync(symlinkPath) === ollama
}
export async function install() {
const command = `do shell script "mkdir -p ${path.dirname(
symlinkPath
)} && ln -F -s ${ollama} ${symlinkPath}" with administrator privileges`
try {
await exec(`osascript -e '${command}'`)
} catch (error) {
console.error(`cli: failed to install cli: ${error.message}`)
return
}
}

9
app/src/ollama.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -4,8 +4,6 @@ import Store from 'electron-store'
const store = new Store()
console.log(process.env)
export const analytics = new Analytics({ writeKey: process.env.TELEMETRY_WRITE_KEY || '<empty>' })
export function id(): string {

View File

@@ -28,4 +28,8 @@ export const rules: Required<ModuleOptions>['rules'] = [
},
},
},
{
test: /\.svg$/,
use: ['@svgr/webpack'],
},
]

View File

@@ -5,36 +5,79 @@ import (
"context"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
"github.com/schollz/progressbar/v3"
"github.com/chzyer/readline"
"github.com/dustin/go-humanize"
"github.com/olekukonko/tablewriter"
"github.com/spf13/cobra"
"golang.org/x/term"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/format"
"github.com/jmorganca/ollama/progressbar"
"github.com/jmorganca/ollama/server"
)
func cacheDir() string {
home, err := os.UserHomeDir()
func create(cmd *cobra.Command, args []string) error {
filename, _ := cmd.Flags().GetString("file")
filename, err := filepath.Abs(filename)
if err != nil {
panic(err)
return err
}
return path.Join(home, ".ollama")
client := api.NewClient()
var spinner *Spinner
request := api.CreateRequest{Name: args[0], Path: filename}
fn := func(resp api.CreateProgress) error {
if spinner != nil {
spinner.Stop()
}
spinner = NewSpinner(resp.Status)
go spinner.Spin(100 * time.Millisecond)
return nil
}
if err := client.Create(context.Background(), &request, fn); err != nil {
return err
}
if spinner != nil {
spinner.Stop()
}
return nil
}
func RunRun(cmd *cobra.Command, args []string) error {
_, err := os.Stat(args[0])
mp := server.ParseModelPath(args[0])
fp, err := mp.GetManifestPath(false)
if err != nil {
return err
}
_, err = os.Stat(fp)
switch {
case errors.Is(err, os.ErrNotExist):
if err := pull(args[0]); err != nil {
return err
var apiStatusError api.StatusError
if !errors.As(err, &apiStatusError) {
return err
}
if apiStatusError.StatusCode != http.StatusBadGateway {
return err
}
}
case err != nil:
return err
@@ -43,108 +86,259 @@ func RunRun(cmd *cobra.Command, args []string) error {
return RunGenerate(cmd, args)
}
func push(cmd *cobra.Command, args []string) error {
client := api.NewClient()
request := api.PushRequest{Name: args[0]}
fn := func(resp api.ProgressResponse) error {
fmt.Println(resp.Status)
return nil
}
if err := client.Push(context.Background(), &request, fn); err != nil {
return err
}
return nil
}
func list(cmd *cobra.Command, args []string) error {
client := api.NewClient()
models, err := client.List(context.Background())
if err != nil {
return err
}
var data [][]string
for _, m := range models.Models {
if len(args) == 0 || strings.HasPrefix(m.Name, args[0]) {
data = append(data, []string{m.Name, humanize.Bytes(uint64(m.Size)), format.HumanTime(m.ModifiedAt, "Never")})
}
}
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"NAME", "SIZE", "MODIFIED"})
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
table.SetAlignment(tablewriter.ALIGN_LEFT)
table.SetHeaderLine(false)
table.SetBorder(false)
table.SetNoWhiteSpace(true)
table.SetTablePadding("\t")
table.AppendBulk(data)
table.Render()
return nil
}
func RunPull(cmd *cobra.Command, args []string) error {
return pull(args[0])
}
func pull(model string) error {
client := api.NewClient()
var currentDigest string
var bar *progressbar.ProgressBar
return client.Pull(
context.Background(),
&api.PullRequest{Model: model},
func(progress api.PullProgress) error {
if bar == nil && progress.Percent == 100 {
// already downloaded
return nil
}
if bar == nil {
bar = progressbar.DefaultBytes(progress.Total)
}
return bar.Set64(progress.Completed)
},
)
request := api.PullRequest{Name: model}
fn := func(resp api.ProgressResponse) error {
if resp.Digest != currentDigest && resp.Digest != "" {
currentDigest = resp.Digest
bar = progressbar.DefaultBytes(
int64(resp.Total),
fmt.Sprintf("pulling %s...", resp.Digest[7:19]),
)
bar.Set(resp.Completed)
} else if resp.Digest == currentDigest && resp.Digest != "" {
bar.Set(resp.Completed)
} else {
currentDigest = ""
fmt.Println(resp.Status)
}
return nil
}
if err := client.Pull(context.Background(), &request, fn); err != nil {
return err
}
return nil
}
func RunGenerate(_ *cobra.Command, args []string) error {
func RunGenerate(cmd *cobra.Command, args []string) error {
if len(args) > 1 {
return generateOneshot(args[0], args[1:]...)
// join all args into a single prompt
return generate(cmd, args[0], strings.Join(args[1:], " "))
}
if term.IsTerminal(int(os.Stdin.Fd())) {
return generateInteractive(args[0])
if readline.IsTerminal(int(os.Stdin.Fd())) {
return generateInteractive(cmd, args[0])
}
return generateBatch(args[0])
return generateBatch(cmd, args[0])
}
func generate(model, prompt string) error {
var generateContextKey struct{}
func generate(cmd *cobra.Command, model, prompt string) error {
if len(strings.TrimSpace(prompt)) > 0 {
client := api.NewClient()
spinner := progressbar.NewOptions(-1,
progressbar.OptionSetWriter(os.Stderr),
progressbar.OptionThrottle(60*time.Millisecond),
progressbar.OptionSpinnerType(14),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionSetElapsedTime(false),
progressbar.OptionClearOnFinish(),
)
spinner := NewSpinner("")
go spinner.Spin(60 * time.Millisecond)
go func() {
for range time.Tick(60 * time.Millisecond) {
if spinner.IsFinished() {
break
}
var latest api.GenerateResponse
spinner.Add(1)
}
}()
generateContext, ok := cmd.Context().Value(generateContextKey).([]int)
if !ok {
generateContext = []int{}
}
client.Generate(context.Background(), &api.GenerateRequest{Model: model, Prompt: prompt}, func(resp api.GenerateResponse) error {
request := api.GenerateRequest{Model: model, Prompt: prompt, Context: generateContext}
fn := func(resp api.GenerateResponse) error {
if !spinner.IsFinished() {
spinner.Finish()
}
latest = resp
fmt.Print(resp.Response)
cmd.SetContext(context.WithValue(cmd.Context(), generateContextKey, resp.Context))
return nil
})
}
fmt.Println()
fmt.Println()
}
return nil
}
func generateOneshot(model string, prompts ...string) error {
for _, prompt := range prompts {
fmt.Printf(">>> %s\n", prompt)
if err := generate(model, prompt); err != nil {
if err := client.Generate(context.Background(), &request, fn); err != nil {
return err
}
fmt.Println()
fmt.Println()
verbose, err := cmd.Flags().GetBool("verbose")
if err != nil {
return err
}
if verbose {
latest.Summary()
}
}
return nil
}
func generateInteractive(model string) error {
fmt.Print(">>> ")
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
if err := generate(model, scanner.Text()); err != nil {
func generateInteractive(cmd *cobra.Command, model string) error {
home, err := os.UserHomeDir()
if err != nil {
return err
}
completer := readline.NewPrefixCompleter(
readline.PcItem("/help"),
readline.PcItem("/list"),
readline.PcItem("/set",
readline.PcItem("history"),
readline.PcItem("nohistory"),
readline.PcItem("verbose"),
readline.PcItem("quiet"),
readline.PcItem("mode",
readline.PcItem("vim"),
readline.PcItem("emacs"),
readline.PcItem("default"),
),
),
readline.PcItem("/exit"),
readline.PcItem("/bye"),
)
usage := func() {
fmt.Fprintln(os.Stderr, "commands:")
fmt.Fprintln(os.Stderr, completer.Tree(" "))
}
config := readline.Config{
Prompt: ">>> ",
HistoryFile: filepath.Join(home, ".ollama", "history"),
AutoComplete: completer,
}
scanner, err := readline.NewEx(&config)
if err != nil {
return err
}
defer scanner.Close()
for {
line, err := scanner.Readline()
switch {
case errors.Is(err, io.EOF):
return nil
case errors.Is(err, readline.ErrInterrupt):
continue
case err != nil:
return err
}
fmt.Print(">>> ")
}
line = strings.TrimSpace(line)
return nil
switch {
case strings.HasPrefix(line, "/list"):
args := strings.Fields(line)
if err := list(cmd, args[1:]); err != nil {
return err
}
continue
case strings.HasPrefix(line, "/set"):
args := strings.Fields(line)
if len(args) > 1 {
switch args[1] {
case "history":
scanner.HistoryEnable()
continue
case "nohistory":
scanner.HistoryDisable()
continue
case "verbose":
cmd.Flags().Set("verbose", "true")
continue
case "quiet":
cmd.Flags().Set("verbose", "false")
continue
case "mode":
if len(args) > 2 {
switch args[2] {
case "vim":
scanner.SetVimMode(true)
continue
case "emacs", "default":
scanner.SetVimMode(false)
continue
}
}
}
}
case line == "/help", line == "/?":
usage()
continue
case line == "/exit", line == "/bye":
return nil
}
if err := generate(cmd, model, line); err != nil {
return err
}
}
}
func generateBatch(model string) error {
func generateBatch(cmd *cobra.Command, model string) error {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
prompt := scanner.Text()
fmt.Printf(">>> %s\n", prompt)
if err := generate(model, prompt); err != nil {
if err := generate(cmd, model, prompt); err != nil {
return err
}
}
@@ -181,14 +375,19 @@ func NewCLI() *cobra.Command {
CompletionOptions: cobra.CompletionOptions{
DisableDefaultCmd: true,
},
PersistentPreRunE: func(_ *cobra.Command, args []string) error {
// create the models directory and it's parent
return os.MkdirAll(path.Join(cacheDir(), "models"), 0o700)
},
}
cobra.EnableCommandSorting = false
createCmd := &cobra.Command{
Use: "create MODEL",
Short: "Create a model from a Modelfile",
Args: cobra.MinimumNArgs(1),
RunE: create,
}
createCmd.Flags().StringP("file", "f", "Modelfile", "Name of the Modelfile (default \"Modelfile\")")
runCmd := &cobra.Command{
Use: "run MODEL [PROMPT]",
Short: "Run a model",
@@ -196,6 +395,8 @@ func NewCLI() *cobra.Command {
RunE: RunRun,
}
runCmd.Flags().Bool("verbose", false, "Show timings for response")
serveCmd := &cobra.Command{
Use: "serve",
Aliases: []string{"start"},
@@ -203,9 +404,33 @@ func NewCLI() *cobra.Command {
RunE: RunServer,
}
pullCmd := &cobra.Command{
Use: "pull MODEL",
Short: "Pull a model from a registry",
Args: cobra.MinimumNArgs(1),
RunE: RunPull,
}
pushCmd := &cobra.Command{
Use: "push MODEL",
Short: "Push a model to a registry",
Args: cobra.MinimumNArgs(1),
RunE: push,
}
listCmd := &cobra.Command{
Use: "list",
Short: "List models",
RunE: list,
}
rootCmd.AddCommand(
serveCmd,
createCmd,
runCmd,
pullCmd,
pushCmd,
listCmd,
)
return rootCmd

44
cmd/spinner.go Normal file
View File

@@ -0,0 +1,44 @@
package cmd
import (
"fmt"
"os"
"time"
"github.com/jmorganca/ollama/progressbar"
)
type Spinner struct {
description string
*progressbar.ProgressBar
}
func NewSpinner(description string) *Spinner {
return &Spinner{
description: description,
ProgressBar: progressbar.NewOptions(-1,
progressbar.OptionSetWriter(os.Stderr),
progressbar.OptionThrottle(60*time.Millisecond),
progressbar.OptionSpinnerType(14),
progressbar.OptionSetRenderBlankState(true),
progressbar.OptionSetElapsedTime(false),
progressbar.OptionClearOnFinish(),
progressbar.OptionSetDescription(description),
),
}
}
func (s *Spinner) Spin(tick time.Duration) {
for range time.Tick(tick) {
if s.IsFinished() {
break
}
s.Add(1)
}
}
func (s *Spinner) Stop() {
s.Finish()
fmt.Println(s.description)
}

View File

@@ -3,13 +3,13 @@
Install required tools:
```
brew install cmake go node
brew install go
```
Then run `make`:
Then build ollama:
```
make
go build .
```
Now you can run `ollama`:

111
docs/modelfile.md Normal file
View File

@@ -0,0 +1,111 @@
# Ollama Model File
A model file is the blueprint to create and share models with Ollama.
## Format
The format of the Modelfile:
```modelfile
# comment
INSTRUCTION arguments
```
| Instruction | Description |
|------------------------- |--------------------------------------------------------- |
| FROM<br>(required) | Defines the base model to be used when creating a model |
| PARAMETER<br>(optional) | Sets the parameters for how the model will be run |
| PROMPT <br>(optional) | Sets the prompt to use when the model will be run |
| LICENSE<br>(optional) | Specify the license of the model. It is additive, and |
## Examples
An example of a model file creating a mario blueprint:
```
FROM llama2
PARAMETER temperature 1
PROMPT """
System: You are Mario from super mario bros, acting as an assistant.
User: {{ .Prompt }}
Assistant:
"""
```
To use this:
1. Save it as a file (eg. modelfile)
2. `ollama create NAME -f <location of the file eg. ./modelfile>'`
3. `ollama run NAME`
4. Start using the model!
## FROM (Required)
The FROM instruction defines the base model to be used when creating a model.
```
FROM <model name>:<tag>
```
### Build from llama2
```
FROM llama2:latest
```
A list of available base models:
<https://github.com/jmorganca/ollama#model-library>
### Build from a bin file
```
FROM ./ollama-model.bin
```
## PARAMETER (Optional)
The PARAMETER instruction defines a parameter that can be set when the model is run.
```
PARAMETER <parameter> <parametervalue>
```
### Valid Parameters and Values
| Parameter | Description | Value Type | Example Usage |
|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|-------------------|
| NumCtx | Sets the size of the prompt context size length model. (Default: 2048) | int | Numctx 4096 |
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | Temperature 0.7 |
| TopK | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | TopK 40 |
| TopP | Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9) | float | TopP 0.9 |
| NumGPU | The number of GPUs to use. On macOS it defaults to 1 to enable metal support, 0 to disable. | int | numGPU 1 |
| RepeatLastN | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = ctx-size) | int | RepeatLastN 64 |
| RepeatPenalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | RepeatPenalty 1.1 |
| TFSZ | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | TFSZ 1 |
| Mirostat | Enable Mirostat sampling for controlling perplexity. (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0) | int | Mirostat 0 |
| MirostatTau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | MirostatTau 5.0 |
| MirostatEta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | MirostatEta 0.1 |
| NumThread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | NumThread 8 |
## PROMPT
Prompt is a set of instructions to an LLM to cause the model to return desired response(s). Typically there are 3-4 components to a prompt: System, context, user, and response.
```modelfile
PROMPT """
{{- if not .Context }}
### System:
You are a content marketer who needs to come up with a short but succinct tweet. Make sure to include the appropriate hashtags and links. Sometimes when appropriate, describe a meme that can be includes as well. All answers should be in the form of a tweet which has a max size of 280 characters. Every instruction will be the topic to create a tweet about.
{{- end }}
### Instruction:
{{ .Prompt }}
### Response:
"""
```
## Notes
- the **modelfile is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.

View File

@@ -1,64 +0,0 @@
# Python SDK
## Install
```
pip install ollama
```
## Example
```python
import ollama
ollama.generate("orca-mini-3b", "hi")
```
## Reference
### `ollama.generate(model, message)`
Generate a completion
```python
ollama.generate("./llama-7b-ggml.bin", "hi")
```
### `ollama.models()`
List available local models
```python
models = ollama.models()
```
### `ollama.load(model)`
Manually a model for generation
```python
ollama.load("model")
```
### `ollama.unload(model)`
Unload a model
```python
ollama.unload("model")
```
### `ollama.pull(model)`
Download a model
```python
ollama.pull("huggingface.co/thebloke/llama-7b-ggml")
```
### `ollama.search(query)`
Search for compatible models that Ollama can run
```python
ollama.search("llama-7b")
```

15
examples/README.md Normal file
View File

@@ -0,0 +1,15 @@
# Examples
This directory contains examples that can be created and run with `ollama`.
To create a model:
```
ollama create example -f <example file>
```
To run a model:
```
ollama run example
```

11
examples/mario/Modelfile Normal file
View File

@@ -0,0 +1,11 @@
FROM llama2
PARAMETER temperature 1
PROMPT """
{{- if not .Context }}
<<SYS>>
You are Mario from super mario bros, acting as an assistant.
<</SYS>>
{{- end }}
[INST] {{ .Prompt }} [/INST]
"""

BIN
examples/mario/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 446 KiB

49
examples/mario/readme.md Normal file
View File

@@ -0,0 +1,49 @@
<img src="logo.png" alt="image of Italian plumber" height="200"/>
# Example character: Mario
This example shows how to create a basic character using Llama2 as the base model.
To run this example:
1. Download the Modelfile
2. `ollama pull llama2` to get the base model used in the model file.
3. `ollama create NAME -f ./Modelfile`
4. `ollama run NAME`
Ask it some questions like "Who are you?" or "Is Peach in trouble again?"
## Editing this file
What the model file looks like:
```
FROM llama2
PARAMETER temperature 1
PROMPT """
{{- if not .Context }}
<<SYS>>
You are Mario from super mario bros, acting as an assistant.
<</SYS>>
{{- end }}
[INST] {{ .Prompt }} [/INST]
"""
```
What if you want to change its behaviour?
- Try changing the prompt
- Try changing the parameters [Docs](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md)
- Try changing the model (e.g. An uncensored model by `FROM wizard-vicuna` this is the wizard-vicuna uncensored model )
Once the changes are made,
1. `ollama create NAME -f ./Modelfile`
2. `ollama run NAME`
3. Iterate until you are happy with the results.
Notes:
- This example is for research purposes only. There is no affiliation with any entity.
- When using an uncensored model, please be aware that it may generate offensive content.

View File

@@ -0,0 +1,15 @@
# Modelfile for creating a Midjourney prompts from a topic
# This prompt was adapted from the original at https://www.greataiprompts.com/guide/midjourney/best-chatgpt-prompt-for-midjourney/
# Run `ollama create mj -f pathtofile` and then `ollama run mj` and enter a topic
FROM nous-hermes
PROMPT """
{{- if not .Context }}
### System:
Embrace your role as an AI-powered creative assistant, employing Midjourney to manifest compelling AI-generated art. I will outline a specific image concept, and in response, you must produce an exhaustive, multifaceted prompt for Midjourney, ensuring every detail of the original concept is represented in your instructions. Midjourney doesn't do well with text, so after the prompt, give me instructions that I can use to create the titles in a image editor.
{{- end }}
### Instruction:
{{ .Prompt }}
### Response:
"""

View File

@@ -0,0 +1,13 @@
# Modelfile for creating a recipe from a list of ingredients
# Run `ollama create recipemaker -f pathtofile` and then `ollama run recipemaker` and feed it lists of ingredients to create recipes around.
FROM nous-hermes
PROMPT """
{{- if not .Context }}
### System:
The instruction will be a list of ingredients. You should generate a recipe that can be made in less than an hour. You can also include ingredients that most people will find in their pantry every day. The recipe should be 4 people and you should include a description of what the meal will taste like
{{- end }}
### Instruction:
{{ .Prompt }}
### Response:
"""

View File

@@ -0,0 +1,14 @@
# Modelfile for creating a tweet from a topic
# Run `ollama create tweetwriter -f pathtofile` and then `ollama run tweetwriter` and enter a topic
FROM nous-hermes
PROMPT """
{{- if not .Context }}
### System:
You are a content marketer who needs to come up with a short but succinct tweet. Make sure to include the appropriate hashtags and links. Sometimes when appropriate, describe a meme that can be includes as well. All answers should be in the form of a tweet which has a max size of 280 characters. Every instruction will be the topic to create a tweet about.
{{- end }}
### Instruction:
{{ .Prompt }}
### Response:
"""

141
format/time.go Normal file
View File

@@ -0,0 +1,141 @@
package format
import (
"fmt"
"math"
"strings"
"time"
)
// HumanDuration returns a human-readable approximation of a duration
// (eg. "About a minute", "4 hours ago", etc.).
// Modified version of github.com/docker/go-units.HumanDuration
func HumanDuration(d time.Duration) string {
return HumanDurationWithCase(d, true)
}
// HumanDurationWithCase returns a human-readable approximation of a
// duration (eg. "About a minute", "4 hours ago", etc.). but allows
// you to specify whether the first word should be capitalized
// (eg. "About" vs. "about")
func HumanDurationWithCase(d time.Duration, useCaps bool) string {
seconds := int(d.Seconds())
switch {
case seconds < 1:
if useCaps {
return "Less than a second"
}
return "less than a second"
case seconds == 1:
return "1 second"
case seconds < 60:
return fmt.Sprintf("%d seconds", seconds)
}
minutes := int(d.Minutes())
switch {
case minutes == 1:
if useCaps {
return "About a minute"
}
return "about a minute"
case minutes < 60:
return fmt.Sprintf("%d minutes", minutes)
}
hours := int(math.Round(d.Hours()))
switch {
case hours == 1:
if useCaps {
return "About an hour"
}
return "about an hour"
case hours < 48:
return fmt.Sprintf("%d hours", hours)
case hours < 24*7*2:
return fmt.Sprintf("%d days", hours/24)
case hours < 24*30*2:
return fmt.Sprintf("%d weeks", hours/24/7)
case hours < 24*365*2:
return fmt.Sprintf("%d months", hours/24/30)
}
return fmt.Sprintf("%d years", int(d.Hours())/24/365)
}
func HumanTime(t time.Time, zeroValue string) string {
return humanTimeWithCase(t, zeroValue, true)
}
func HumanTimeLower(t time.Time, zeroValue string) string {
return humanTimeWithCase(t, zeroValue, false)
}
func humanTimeWithCase(t time.Time, zeroValue string, useCaps bool) string {
if t.IsZero() {
return zeroValue
}
delta := time.Since(t)
if delta < 0 {
return HumanDurationWithCase(-delta, useCaps) + " from now"
}
return HumanDurationWithCase(delta, useCaps) + " ago"
}
// ExcatDuration returns a human readable hours/minutes/seconds or milliseconds format of a duration
// the most precise level of duration is milliseconds
func ExactDuration(d time.Duration) string {
if d.Seconds() < 1 {
if d.Milliseconds() == 1 {
return fmt.Sprintf("%d millisecond", d.Milliseconds())
}
return fmt.Sprintf("%d milliseconds", d.Milliseconds())
}
var readableDur strings.Builder
dur := d.String()
// split the default duration string format of 0h0m0s into something nicer to read
h := strings.Split(dur, "h")
if len(h) > 1 {
hours := h[0]
if hours == "1" {
readableDur.WriteString(fmt.Sprintf("%s hour ", hours))
} else {
readableDur.WriteString(fmt.Sprintf("%s hours ", hours))
}
dur = h[1]
}
m := strings.Split(dur, "m")
if len(m) > 1 {
mins := m[0]
switch mins {
case "0":
// skip
case "1":
readableDur.WriteString(fmt.Sprintf("%s minute ", mins))
default:
readableDur.WriteString(fmt.Sprintf("%s minutes ", mins))
}
dur = m[1]
}
s := strings.Split(dur, "s")
if len(s) > 0 {
sec := s[0]
switch sec {
case "0":
// skip
case "1":
readableDur.WriteString(fmt.Sprintf("%s second ", sec))
default:
readableDur.WriteString(fmt.Sprintf("%s seconds ", sec))
}
}
return strings.TrimSpace(readableDur.String())
}

102
format/time_test.go Normal file
View File

@@ -0,0 +1,102 @@
package format
import (
"testing"
"time"
)
func assertEqual(t *testing.T, a interface{}, b interface{}) {
if a != b {
t.Errorf("Assert failed, expected %v, got %v", b, a)
}
}
func TestHumanDuration(t *testing.T) {
day := 24 * time.Hour
week := 7 * day
month := 30 * day
year := 365 * day
assertEqual(t, "Less than a second", HumanDuration(450*time.Millisecond))
assertEqual(t, "Less than a second", HumanDurationWithCase(450*time.Millisecond, true))
assertEqual(t, "less than a second", HumanDurationWithCase(450*time.Millisecond, false))
assertEqual(t, "1 second", HumanDuration(1*time.Second))
assertEqual(t, "45 seconds", HumanDuration(45*time.Second))
assertEqual(t, "46 seconds", HumanDuration(46*time.Second))
assertEqual(t, "59 seconds", HumanDuration(59*time.Second))
assertEqual(t, "About a minute", HumanDuration(60*time.Second))
assertEqual(t, "About a minute", HumanDurationWithCase(1*time.Minute, true))
assertEqual(t, "about a minute", HumanDurationWithCase(1*time.Minute, false))
assertEqual(t, "3 minutes", HumanDuration(3*time.Minute))
assertEqual(t, "35 minutes", HumanDuration(35*time.Minute))
assertEqual(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second))
assertEqual(t, "45 minutes", HumanDuration(45*time.Minute))
assertEqual(t, "45 minutes", HumanDuration(45*time.Minute+40*time.Second))
assertEqual(t, "46 minutes", HumanDuration(46*time.Minute))
assertEqual(t, "59 minutes", HumanDuration(59*time.Minute))
assertEqual(t, "About an hour", HumanDuration(1*time.Hour))
assertEqual(t, "About an hour", HumanDurationWithCase(1*time.Hour+29*time.Minute, true))
assertEqual(t, "about an hour", HumanDurationWithCase(1*time.Hour+29*time.Minute, false))
assertEqual(t, "2 hours", HumanDuration(1*time.Hour+31*time.Minute))
assertEqual(t, "2 hours", HumanDuration(1*time.Hour+59*time.Minute))
assertEqual(t, "3 hours", HumanDuration(3*time.Hour))
assertEqual(t, "3 hours", HumanDuration(3*time.Hour+29*time.Minute))
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+31*time.Minute))
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+59*time.Minute))
assertEqual(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute))
assertEqual(t, "24 hours", HumanDuration(24*time.Hour))
assertEqual(t, "36 hours", HumanDuration(1*day+12*time.Hour))
assertEqual(t, "2 days", HumanDuration(2*day))
assertEqual(t, "7 days", HumanDuration(7*day))
assertEqual(t, "13 days", HumanDuration(13*day+5*time.Hour))
assertEqual(t, "2 weeks", HumanDuration(2*week))
assertEqual(t, "2 weeks", HumanDuration(2*week+4*day))
assertEqual(t, "3 weeks", HumanDuration(3*week))
assertEqual(t, "4 weeks", HumanDuration(4*week))
assertEqual(t, "4 weeks", HumanDuration(4*week+3*day))
assertEqual(t, "4 weeks", HumanDuration(1*month))
assertEqual(t, "6 weeks", HumanDuration(1*month+2*week))
assertEqual(t, "2 months", HumanDuration(2*month))
assertEqual(t, "2 months", HumanDuration(2*month+2*week))
assertEqual(t, "3 months", HumanDuration(3*month))
assertEqual(t, "3 months", HumanDuration(3*month+1*week))
assertEqual(t, "5 months", HumanDuration(5*month+2*week))
assertEqual(t, "13 months", HumanDuration(13*month))
assertEqual(t, "23 months", HumanDuration(23*month))
assertEqual(t, "24 months", HumanDuration(24*month))
assertEqual(t, "2 years", HumanDuration(24*month+2*week))
assertEqual(t, "3 years", HumanDuration(3*year+2*month))
}
func TestHumanTime(t *testing.T) {
now := time.Now()
t.Run("zero value", func(t *testing.T) {
assertEqual(t, HumanTime(time.Time{}, "never"), "never")
})
t.Run("time in the future", func(t *testing.T) {
v := now.Add(48 * time.Hour)
assertEqual(t, HumanTime(v, ""), "2 days from now")
})
t.Run("time in the past", func(t *testing.T) {
v := now.Add(-48 * time.Hour)
assertEqual(t, HumanTime(v, ""), "2 days ago")
})
}
func TestExactDuration(t *testing.T) {
assertEqual(t, "1 millisecond", ExactDuration(1*time.Millisecond))
assertEqual(t, "10 milliseconds", ExactDuration(10*time.Millisecond))
assertEqual(t, "1 second", ExactDuration(1*time.Second))
assertEqual(t, "10 seconds", ExactDuration(10*time.Second))
assertEqual(t, "1 minute", ExactDuration(1*time.Minute))
assertEqual(t, "10 minutes", ExactDuration(10*time.Minute))
assertEqual(t, "1 hour", ExactDuration(1*time.Hour))
assertEqual(t, "10 hours", ExactDuration(10*time.Hour))
assertEqual(t, "1 hour 1 second", ExactDuration(1*time.Hour+1*time.Second))
assertEqual(t, "1 hour 10 seconds", ExactDuration(1*time.Hour+10*time.Second))
assertEqual(t, "1 hour 1 minute", ExactDuration(1*time.Hour+1*time.Minute))
assertEqual(t, "1 hour 10 minutes", ExactDuration(1*time.Hour+10*time.Minute))
assertEqual(t, "1 hour 1 minute 1 second", ExactDuration(1*time.Hour+1*time.Minute+1*time.Second))
assertEqual(t, "10 hours 10 minutes 10 seconds", ExactDuration(10*time.Hour+10*time.Minute+10*time.Second))
}

1
ggml-metal.metal Symbolic link
View File

@@ -0,0 +1 @@
llama/ggml-metal.metal

14
go.mod
View File

@@ -3,19 +3,21 @@ module github.com/jmorganca/ollama
go 1.20
require (
github.com/dustin/go-humanize v1.0.1
github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-runewidth v0.0.14
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
github.com/olekukonko/tablewriter v0.0.5
github.com/spf13/cobra v1.7.0
)
require (
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/rivo/uniseg v0.2.0 // indirect
)
require github.com/rivo/uniseg v0.2.0 // indirect
require (
dario.cat/mergo v1.0.0
github.com/bytedance/sonic v1.9.1 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/chzyer/readline v1.5.1
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
@@ -27,12 +29,10 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/lithammer/fuzzysearch v1.1.8
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/schollz/progressbar/v3 v3.13.1
github.com/spf13/pflag v1.0.5 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect

53
go.sum
View File

@@ -1,13 +1,23 @@
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
@@ -32,17 +42,14 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4=
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
@@ -52,6 +59,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -59,8 +68,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/schollz/progressbar/v3 v3.13.1 h1:o8rySDYiQ59Mwzy2FELeHY5ZARXZTVJC7iHD6PEFUiE=
github.com/schollz/progressbar/v3 v3.13.1/go.mod h1:xvrbki8kfT1fzWzBT/UZd9L6GA+jdL7HAgq2RFnO6fQ=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -80,54 +87,22 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=

1
llama/.gitignore vendored
View File

@@ -1 +0,0 @@
build

View File

@@ -1,23 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(binding)
include(FetchContent)
FetchContent_Declare(
llama_cpp
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git
GIT_TAG 55dbb91
)
FetchContent_MakeAvailable(llama_cpp)
add_library(binding ${CMAKE_CURRENT_SOURCE_DIR}/binding/binding.cpp ${llama_cpp_SOURCE_DIR}/examples/common.cpp)
target_include_directories(binding PRIVATE ${llama_cpp_SOURCE_DIR}/examples)
target_link_libraries(binding llama ggml_static)
if (LLAMA_METAL)
configure_file(${llama_cpp_SOURCE_DIR}/ggml-metal.metal ${CMAKE_CURRENT_BINARY_DIR}/../../ggml-metal.metal COPYONLY)
endif()
add_custom_target(copy_libllama ALL COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:llama> ${CMAKE_CURRENT_BINARY_DIR})
add_custom_target(copy_libggml_static ALL COMMAND ${CMAKE_COMMAND} -E copy_if_different $<TARGET_FILE:ggml_static> ${CMAKE_CURRENT_BINARY_DIR})

View File

@@ -1,691 +0,0 @@
#include "common.h"
#include "llama.h"
#include "binding.h"
#include <cassert>
#include <cinttypes>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <iostream>
#include <regex>
#include <sstream>
#include <string>
#include <vector>
#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))
#include <signal.h>
#include <unistd.h>
#elif defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
#include <signal.h>
#include <windows.h>
#endif
#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || \
defined(_WIN32)
void sigint_handler(int signo) {
if (signo == SIGINT) {
_exit(130);
}
}
#endif
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
gpt_params params = *params_p;
if (params.seed <= 0) {
params.seed = time(NULL);
}
std::mt19937 rng(params.seed);
llama_init_backend(params.numa);
int n_past = 0;
// Add a space in front of the first character to match OG llama tokenizer
// behavior
params.prompt.insert(0, 1, ' ');
// tokenize the prompt
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
// determine newline token
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
if (embd_inp.size() > 0) {
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past,
params.n_threads)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}
}
const int n_embd = llama_n_embd(ctx);
const auto embeddings = llama_get_embeddings(ctx);
for (int i = 0; i < n_embd; i++) {
res_embeddings[i] = embeddings[i];
}
return 0;
}
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
int tokenSize, float *res_embeddings) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
gpt_params params = *params_p;
for (int i = 0; i < tokenSize; i++) {
auto token_str = llama_token_to_str(ctx, tokens[i]);
if (token_str == nullptr) {
continue;
}
std::vector<std::string> my_vector;
std::string str_token(token_str); // create a new std::string from the char*
params_p->prompt += str_token;
}
return get_embeddings(params_ptr, state_pr, res_embeddings);
}
int eval(void *params_ptr, void *state_pr, char *text) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
auto n_past = 0;
auto last_n_tokens_data =
std::vector<llama_token>(params_p->repeat_last_n, 0);
auto tokens = std::vector<llama_token>(params_p->n_ctx);
auto n_prompt_tokens =
llama_tokenize(ctx, text, tokens.data(), tokens.size(), true);
if (n_prompt_tokens < 1) {
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
return 1;
}
// evaluate prompt
return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past,
params_p->n_threads);
}
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug) {
gpt_params *params_p = (gpt_params *)params_ptr;
llama_context *ctx = (llama_context *)state_pr;
gpt_params params = *params_p;
const int n_ctx = llama_n_ctx(ctx);
if (params.seed <= 0) {
params.seed = time(NULL);
}
std::mt19937 rng(params.seed);
std::string path_session = params.path_prompt_cache;
std::vector<llama_token> session_tokens;
if (!path_session.empty()) {
if (debug) {
fprintf(stderr, "%s: attempting to load saved session from '%s'\n",
__func__, path_session.c_str());
}
// fopen to check for existing session
FILE *fp = std::fopen(path_session.c_str(), "rb");
if (fp != NULL) {
std::fclose(fp);
session_tokens.resize(n_ctx);
size_t n_token_count_out = 0;
if (!llama_load_session_file(
ctx, path_session.c_str(), session_tokens.data(),
session_tokens.capacity(), &n_token_count_out)) {
fprintf(stderr, "%s: error: failed to load session file '%s'\n",
__func__, path_session.c_str());
return 1;
}
session_tokens.resize(n_token_count_out);
llama_set_rng_seed(ctx, params.seed);
if (debug) {
fprintf(stderr, "%s: loaded a session with prompt size of %d tokens\n",
__func__, (int)session_tokens.size());
}
} else {
if (debug) {
fprintf(stderr, "%s: session file does not exist, will create\n",
__func__);
}
}
}
std::vector<llama_token> embd_inp;
if (!params.prompt.empty() || session_tokens.empty()) {
// Add a space in front of the first character to match OG llama tokenizer
// behavior
params.prompt.insert(0, 1, ' ');
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
} else {
embd_inp = session_tokens;
}
// debug message about similarity of saved session, if applicable
size_t n_matching_session_tokens = 0;
if (session_tokens.size()) {
for (llama_token id : session_tokens) {
if (n_matching_session_tokens >= embd_inp.size() ||
id != embd_inp[n_matching_session_tokens]) {
break;
}
n_matching_session_tokens++;
}
if (debug) {
if (params.prompt.empty() &&
n_matching_session_tokens == embd_inp.size()) {
fprintf(stderr, "%s: using full prompt from session file\n", __func__);
} else if (n_matching_session_tokens >= embd_inp.size()) {
fprintf(stderr, "%s: session file has exact match for prompt!\n",
__func__);
} else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
fprintf(stderr,
"%s: warning: session file has low similarity to prompt (%zu / "
"%zu tokens); will mostly be reevaluated\n",
__func__, n_matching_session_tokens, embd_inp.size());
} else {
fprintf(stderr, "%s: session file matches %zu / %zu tokens of prompt\n",
__func__, n_matching_session_tokens, embd_inp.size());
}
}
}
// if we will use the cache for the full prompt without reaching the end of
// the cache, force reevaluation of the last token token to recalculate the
// cached logits
if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() &&
session_tokens.size() > embd_inp.size()) {
session_tokens.resize(embd_inp.size() - 1);
}
// number of tokens to keep when resetting context
if (params.n_keep < 0 || params.n_keep > (int)embd_inp.size()) {
params.n_keep = (int)embd_inp.size();
}
// determine newline token
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
// TODO: replace with ring-buffer
std::vector<llama_token> last_n_tokens(n_ctx);
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
bool need_to_save_session =
!path_session.empty() && n_matching_session_tokens < embd_inp.size();
int n_past = 0;
int n_remain = params.n_predict;
int n_consumed = 0;
int n_session_consumed = 0;
std::vector<llama_token> embd;
std::string res = "";
// do one empty run to warm up the model
{
const std::vector<llama_token> tmp = {
llama_token_bos(),
};
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
llama_reset_timings(ctx);
}
while (n_remain != 0) {
// predict
if (embd.size() > 0) {
// infinite text generation via context swapping
// if we run out of context:
// - take the n_keep first tokens from the original prompt (via n_past)
// - take half of the last (n_ctx - n_keep) tokens and recompute the
// logits in batches
if (n_past + (int)embd.size() > n_ctx) {
const int n_left = n_past - params.n_keep;
// always keep the first token - BOS
n_past = std::max(1, params.n_keep);
// insert n_left/2 tokens at the start of embd from last_n_tokens
embd.insert(embd.begin(),
last_n_tokens.begin() + n_ctx - n_left / 2 - embd.size(),
last_n_tokens.end() - embd.size());
// stop saving session if we run out of context
path_session.clear();
// printf("\n---\n");
// printf("resetting: '");
// for (int i = 0; i < (int) embd.size(); i++) {
// printf("%s", llama_token_to_str(ctx, embd[i]));
// }
// printf("'\n");
// printf("\n---\n");
}
// try to reuse a matching prefix from the loaded session instead of
// re-eval (via n_past)
if (n_session_consumed < (int)session_tokens.size()) {
size_t i = 0;
for (; i < embd.size(); i++) {
if (embd[i] != session_tokens[n_session_consumed]) {
session_tokens.resize(n_session_consumed);
break;
}
n_past++;
n_session_consumed++;
if (n_session_consumed >= (int)session_tokens.size()) {
++i;
break;
}
}
if (i > 0) {
embd.erase(embd.begin(), embd.begin() + i);
}
}
// evaluate tokens in batches
// embd is typically prepared beforehand to fit within a batch, but not
// always
for (int i = 0; i < (int)embd.size(); i += params.n_batch) {
int n_eval = (int)embd.size() - i;
if (n_eval > params.n_batch) {
n_eval = params.n_batch;
}
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}
n_past += n_eval;
}
if (embd.size() > 0 && !path_session.empty()) {
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
n_session_consumed = session_tokens.size();
}
}
embd.clear();
if ((int)embd_inp.size() <= n_consumed) {
// out of user input, sample next token
const float temp = params.temp;
const int32_t top_k =
params.top_k <= 0 ? llama_n_vocab(ctx) : params.top_k;
const float top_p = params.top_p;
const float tfs_z = params.tfs_z;
const float typical_p = params.typical_p;
const int32_t repeat_last_n =
params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
const float repeat_penalty = params.repeat_penalty;
const float alpha_presence = params.presence_penalty;
const float alpha_frequency = params.frequency_penalty;
const int mirostat = params.mirostat;
const float mirostat_tau = params.mirostat_tau;
const float mirostat_eta = params.mirostat_eta;
const bool penalize_nl = params.penalize_nl;
// optionally save the session on first sample (for faster prompt loading
// next time)
if (!path_session.empty() && need_to_save_session &&
!params.prompt_cache_ro) {
need_to_save_session = false;
llama_save_session_file(ctx, path_session.c_str(),
session_tokens.data(), session_tokens.size());
}
llama_token id = 0;
{
auto logits = llama_get_logits(ctx);
auto n_vocab = llama_n_vocab(ctx);
// Apply params.logit_bias map
for (auto it = params.logit_bias.begin(); it != params.logit_bias.end();
it++) {
logits[it->first] += it->second;
}
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
candidates.emplace_back(
llama_token_data{token_id, logits[token_id], 0.0f});
}
llama_token_data_array candidates_p = {candidates.data(),
candidates.size(), false};
// Apply penalties
float nl_logit = logits[llama_token_nl()];
auto last_n_repeat =
std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
llama_sample_repetition_penalty(
ctx, &candidates_p,
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
last_n_repeat, repeat_penalty);
llama_sample_frequency_and_presence_penalties(
ctx, &candidates_p,
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
last_n_repeat, alpha_frequency, alpha_presence);
if (!penalize_nl) {
logits[llama_token_nl()] = nl_logit;
}
if (temp <= 0) {
// Greedy sampling
id = llama_sample_token_greedy(ctx, &candidates_p);
} else {
if (mirostat == 1) {
static float mirostat_mu = 2.0f * mirostat_tau;
const int mirostat_m = 100;
llama_sample_temperature(ctx, &candidates_p, temp);
id = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau,
mirostat_eta, mirostat_m,
&mirostat_mu);
} else if (mirostat == 2) {
static float mirostat_mu = 2.0f * mirostat_tau;
llama_sample_temperature(ctx, &candidates_p, temp);
id = llama_sample_token_mirostat_v2(
ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
} else {
// Temperature sampling
llama_sample_top_k(ctx, &candidates_p, top_k, 1);
llama_sample_tail_free(ctx, &candidates_p, tfs_z, 1);
llama_sample_typical(ctx, &candidates_p, typical_p, 1);
llama_sample_top_p(ctx, &candidates_p, top_p, 1);
llama_sample_temperature(ctx, &candidates_p, temp);
id = llama_sample_token(ctx, &candidates_p);
}
}
// printf("`%d`", candidates_p.size);
last_n_tokens.erase(last_n_tokens.begin());
last_n_tokens.push_back(id);
}
// add it to the context
embd.push_back(id);
// decrement remaining sampling budget
--n_remain;
// call the token callback, no need to check if one is actually
// registered, that will be handled on the Go side.
auto token_str = llama_token_to_str(ctx, id);
if (!tokenCallback(state_pr, (char *)token_str)) {
break;
}
} else {
// some user input remains from prompt or interaction, forward it to
// processing
while ((int)embd_inp.size() > n_consumed) {
embd.push_back(embd_inp[n_consumed]);
last_n_tokens.erase(last_n_tokens.begin());
last_n_tokens.push_back(embd_inp[n_consumed]);
++n_consumed;
if ((int)embd.size() >= params.n_batch) {
break;
}
}
}
for (auto id : embd) {
res += llama_token_to_str(ctx, id);
}
// check for stop prompt
if (params.antiprompt.size()) {
std::string last_output;
for (auto id : last_n_tokens) {
last_output += llama_token_to_str(ctx, id);
}
// Check if each of the reverse prompts appears at the end of the output.
for (std::string &antiprompt : params.antiprompt) {
// size_t extra_padding = params.interactive ? 0 : 2;
size_t extra_padding = 2;
size_t search_start_pos =
last_output.length() >
static_cast<size_t>(antiprompt.length() + extra_padding)
? last_output.length() -
static_cast<size_t>(antiprompt.length() + extra_padding)
: 0;
if (last_output.find(antiprompt.c_str(), search_start_pos) !=
std::string::npos) {
goto end;
}
}
}
// end of text token
if (!embd.empty() && embd.back() == llama_token_eos()) {
break;
}
}
if (!path_session.empty() && params.prompt_cache_all &&
!params.prompt_cache_ro) {
if (debug) {
fprintf(stderr, "\n%s: saving final output to session file '%s'\n",
__func__, path_session.c_str());
}
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(),
session_tokens.size());
}
end:
#if defined(_WIN32)
signal(SIGINT, SIG_DFL);
#endif
if (debug) {
llama_print_timings(ctx);
llama_reset_timings(ctx);
}
strcpy(result, res.c_str());
return 0;
}
void llama_binding_free_model(void *state_ptr) {
llama_context *ctx = (llama_context *)state_ptr;
llama_free(ctx);
}
void llama_free_params(void *params_ptr) {
gpt_params *params = (gpt_params *)params_ptr;
delete params;
}
std::vector<std::string> create_vector(const char **strings, int count) {
std::vector<std::string> *vec = new std::vector<std::string>;
for (int i = 0; i < count; i++) {
vec->push_back(std::string(strings[i]));
}
return *vec;
}
void delete_vector(std::vector<std::string> *vec) { delete vec; }
int load_state(void *ctx, char *statefile, char *modes) {
llama_context *state = (llama_context *)ctx;
const llama_context *constState = static_cast<const llama_context *>(state);
const size_t state_size = llama_get_state_size(state);
uint8_t *state_mem = new uint8_t[state_size];
{
FILE *fp_read = fopen(statefile, modes);
if (state_size != llama_get_state_size(constState)) {
fprintf(stderr, "\n%s : failed to validate state size\n", __func__);
return 1;
}
const size_t ret = fread(state_mem, 1, state_size, fp_read);
if (ret != state_size) {
fprintf(stderr, "\n%s : failed to read state\n", __func__);
return 1;
}
llama_set_state_data(
state, state_mem); // could also read directly from memory mapped file
fclose(fp_read);
}
return 0;
}
void save_state(void *ctx, char *dst, char *modes) {
llama_context *state = (llama_context *)ctx;
const size_t state_size = llama_get_state_size(state);
uint8_t *state_mem = new uint8_t[state_size];
// Save state (rng, logits, embedding and kv_cache) to file
{
FILE *fp_write = fopen(dst, modes);
llama_copy_state_data(
state, state_mem); // could also copy directly to memory mapped file
fwrite(state_mem, 1, state_size, fp_write);
fclose(fp_write);
}
}
void *llama_allocate_params(
const char *prompt, int seed, int threads, int tokens, int top_k,
float top_p, float temp, float repeat_penalty, int repeat_last_n,
bool ignore_eos, bool memory_f16, int n_batch, int n_keep,
const char **antiprompt, int antiprompt_count, float tfs_z, float typical_p,
float frequency_penalty, float presence_penalty, int mirostat,
float mirostat_eta, float mirostat_tau, bool penalize_nl,
const char *logit_bias, bool mlock, bool mmap, const char *maingpu,
const char *tensorsplit) {
gpt_params *params = new gpt_params;
params->seed = seed;
params->n_threads = threads;
params->n_predict = tokens;
params->repeat_last_n = repeat_last_n;
params->top_k = top_k;
params->top_p = top_p;
params->memory_f16 = memory_f16;
params->temp = temp;
params->use_mmap = mmap;
params->use_mlock = mlock;
params->repeat_penalty = repeat_penalty;
params->n_batch = n_batch;
params->n_keep = n_keep;
if (maingpu[0] != '\0') {
params->main_gpu = std::stoi(maingpu);
}
if (tensorsplit[0] != '\0') {
std::string arg_next = tensorsplit;
// split string by , and /
const std::regex regex{R"([,/]+)"};
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
std::vector<std::string> split_arg{it, {}};
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
if (i < split_arg.size()) {
params->tensor_split[i] = std::stof(split_arg[i]);
} else {
params->tensor_split[i] = 0.0f;
}
}
}
if (ignore_eos) {
params->logit_bias[llama_token_eos()] = -INFINITY;
}
if (antiprompt_count > 0) {
params->antiprompt = create_vector(antiprompt, antiprompt_count);
}
params->tfs_z = tfs_z;
params->typical_p = typical_p;
params->presence_penalty = presence_penalty;
params->mirostat = mirostat;
params->mirostat_eta = mirostat_eta;
params->mirostat_tau = mirostat_tau;
params->penalize_nl = penalize_nl;
std::stringstream ss(logit_bias);
llama_token key;
char sign;
std::string value_str;
if (ss >> key && ss >> sign && std::getline(ss, value_str) &&
(sign == '+' || sign == '-')) {
params->logit_bias[key] =
std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
}
params->frequency_penalty = frequency_penalty;
params->prompt = prompt;
return params;
}
void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
bool mlock, bool embeddings, bool mmap, bool low_vram,
bool vocab_only, int n_gpu_layers, int n_batch,
const char *maingpu, const char *tensorsplit, bool numa) {
// load the model
auto lparams = llama_context_default_params();
lparams.n_ctx = n_ctx;
lparams.seed = n_seed;
lparams.f16_kv = memory_f16;
lparams.embedding = embeddings;
lparams.use_mlock = mlock;
lparams.n_gpu_layers = n_gpu_layers;
lparams.use_mmap = mmap;
lparams.low_vram = low_vram;
lparams.vocab_only = vocab_only;
if (maingpu[0] != '\0') {
lparams.main_gpu = std::stoi(maingpu);
}
if (tensorsplit[0] != '\0') {
std::string arg_next = tensorsplit;
// split string by , and /
const std::regex regex{R"([,/]+)"};
std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
std::vector<std::string> split_arg{it, {}};
GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
if (i < split_arg.size()) {
lparams.tensor_split[i] = std::stof(split_arg[i]);
} else {
lparams.tensor_split[i] = 0.0f;
}
}
}
lparams.n_batch = n_batch;
llama_init_backend(numa);
void *res = nullptr;
try {
res = llama_init_from_file(fname, lparams);
} catch (std::runtime_error &e) {
fprintf(stderr, "failed %s", e.what());
return res;
}
return res;
}

View File

@@ -1,48 +0,0 @@
#ifdef __cplusplus
#include <string>
#include <vector>
extern "C" {
#endif
#include <stdbool.h>
extern unsigned char tokenCallback(void *, char *);
int load_state(void *ctx, char *statefile, char *modes);
int eval(void *params_ptr, void *ctx, char *text);
void save_state(void *ctx, char *dst, char *modes);
void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16,
bool mlock, bool embeddings, bool mmap, bool low_vram,
bool vocab_only, int n_gpu, int n_batch, const char *maingpu,
const char *tensorsplit, bool numa);
int get_embeddings(void *params_ptr, void *state_pr, float *res_embeddings);
int get_token_embeddings(void *params_ptr, void *state_pr, int *tokens,
int tokenSize, float *res_embeddings);
void *llama_allocate_params(
const char *prompt, int seed, int threads, int tokens, int top_k,
float top_p, float temp, float repeat_penalty, int repeat_last_n,
bool ignore_eos, bool memory_f16, int n_batch, int n_keep,
const char **antiprompt, int antiprompt_count, float tfs_z, float typical_p,
float frequency_penalty, float presence_penalty, int mirostat,
float mirostat_eta, float mirostat_tau, bool penalize_nl,
const char *logit_bias, bool mlock, bool mmap, const char *maingpu,
const char *tensorsplit);
void llama_free_params(void *params_ptr);
void llama_binding_free_model(void *state);
int llama_predict(void *params_ptr, void *state_pr, char *result, bool debug);
#ifdef __cplusplus
}
std::vector<std::string> create_vector(const char **strings, int count);
void delete_vector(std::vector<std::string> *vec);
#endif

3414
llama/ggml-cuda.cu Normal file

File diff suppressed because it is too large Load Diff

62
llama/ggml-cuda.h Normal file
View File

@@ -0,0 +1,62 @@
/**
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
#define GGML_CUDA_MAX_DEVICES 16
void ggml_init_cublas(void);
void ggml_cuda_set_tensor_split(const float * tensor_split);
void ggml_cuda_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
size_t ggml_cuda_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
void ggml_cuda_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
// TODO: export these with GGML_API
void * ggml_cuda_host_malloc(size_t size);
void ggml_cuda_host_free(void * ptr);
void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
void ggml_cuda_free_data(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
void ggml_cuda_set_main_device(int main_device);
void ggml_cuda_set_scratch_size(size_t scratch_size);
void ggml_cuda_free_scratch(void);
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
#ifdef __cplusplus
}
#endif

97
llama/ggml-metal.h Normal file
View File

@@ -0,0 +1,97 @@
/**
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// An interface allowing to compute ggml_cgraph with Metal
//
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
//
// How it works?
//
// As long as your program can create and evaluate a ggml_cgraph on the CPU, you can use this
// interface to evaluate the same graph on the GPU. Instead of using ggml_graph_compute(), you
// use ggml_metal_graph_compute() (or ggml_vulkan_graph_compute(), etc.)
//
// You only need to make sure that all memory buffers that you used during the graph creation
// are mapped to the device memory with the ggml_metal_add_buffer() function. This mapping is
// used during the graph evaluation to determine the arguments of the compute kernels.
//
// Synchronization between device and host memory (for example for input and output tensors)
// is done with the ggml_metal_set_tensor() and ggml_metal_get_tensor() functions.
//
#pragma once
#include <stddef.h>
#include <stdbool.h>
// max memory buffers that can be mapped to the device
#define GGML_METAL_MAX_BUFFERS 16
struct ggml_tensor;
struct ggml_cgraph;
#ifdef __cplusplus
extern "C" {
#endif
struct ggml_metal_context;
// number of command buffers to use
struct ggml_metal_context * ggml_metal_init(int n_cb);
void ggml_metal_free(struct ggml_metal_context * ctx);
// set the number of command buffers to use
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb);
// creates a mapping between a host memory buffer and a device memory buffer
// - make sure to map all buffers used in the graph before calling ggml_metal_graph_compute
// - the mapping is used during computation to determine the arguments of the compute kernels
// - you don't need to keep the host memory buffer allocated as it is never accessed by Metal
// - max_size specifies the maximum size of a tensor and is used to create shared views such
// that it is guaranteed that the tensor will fit in at least one of the views
//
bool ggml_metal_add_buffer(
struct ggml_metal_context * ctx,
const char * name,
void * data,
size_t size,
size_t max_size);
// set data from host memory into the device
void ggml_metal_set_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
// get data from the device into host memory
void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor * t);
// same as ggml_graph_compute but uses Metal
// creates gf->n_threads command buffers in parallel
void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
#ifdef __cplusplus
}
#endif

1016
llama/ggml-metal.m Normal file

File diff suppressed because it is too large Load Diff

1855
llama/ggml-metal.metal Normal file

File diff suppressed because it is too large Load Diff

18380
llama/ggml.c Normal file

File diff suppressed because it is too large Load Diff

1575
llama/ggml.h Normal file

File diff suppressed because it is too large Load Diff

3926
llama/k_quants.c Normal file

File diff suppressed because it is too large Load Diff

183
llama/k_quants.h Normal file
View File

@@ -0,0 +1,183 @@
/**
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#include "ggml.h"
#include <stdint.h>
#include <assert.h>
#include <stddef.h>
// Super-block size
#ifdef GGML_QKK_64
#define QK_K 64
#define K_SCALE_SIZE 4
#else
#define QK_K 256
#define K_SCALE_SIZE 12
#endif
//
// Super-block quantization structures
//
// 2-bit quantization
// weight is represented as x = a * q + b
// 16 blocks of 16 elemenets each
// Effectively 2.5625 bits per weight
typedef struct {
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
uint8_t qs[QK_K/4]; // quants
ggml_fp16_t d; // super-block scale for quantized scales
ggml_fp16_t dmin; // super-block scale for quantized mins
} block_q2_K;
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
// 3-bit quantization
// weight is represented as x = a * q
// 16 blocks of 16 elemenets each
// Effectively 3.4375 bits per weight
#ifdef GGML_QKK_64
typedef struct {
uint8_t hmask[QK_K/8]; // quants - high bit
uint8_t qs[QK_K/4]; // quants - low 2 bits
uint8_t scales[2];
ggml_fp16_t d; // super-block scale
} block_q3_K;
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
#else
typedef struct {
uint8_t hmask[QK_K/8]; // quants - high bit
uint8_t qs[QK_K/4]; // quants - low 2 bits
uint8_t scales[12]; // scales, quantized with 6 bits
ggml_fp16_t d; // super-block scale
} block_q3_K;
static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
#endif
// 4-bit quantization
// 16 blocks of 32 elements each
// weight is represented as x = a * q + b
// Effectively 4.5 bits per weight
#ifdef GGML_QKK_64
typedef struct {
ggml_fp16_t d[2]; // super-block scales/mins
uint8_t scales[2]; // 4-bit block scales/mins
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
#else
typedef struct {
ggml_fp16_t d; // super-block scale for quantized scales
ggml_fp16_t dmin; // super-block scale for quantized mins
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
uint8_t qs[QK_K/2]; // 4--bit quants
} block_q4_K;
static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
#endif
// 5-bit quantization
// 16 blocks of 32 elements each
// weight is represented as x = a * q + b
// Effectively 5.5 bits per weight
#ifdef GGML_QKK_64
typedef struct {
ggml_fp16_t d; // super-block scale
int8_t scales[QK_K/16]; // 8-bit block scales
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
#else
typedef struct {
ggml_fp16_t d; // super-block scale for quantized scales
ggml_fp16_t dmin; // super-block scale for quantized mins
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
} block_q5_K;
static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
#endif
// 6-bit quantization
// weight is represented as x = a * q
// 16 blocks of 16 elemenets each
// Effectively 6.5625 bits per weight
typedef struct {
uint8_t ql[QK_K/2]; // quants, lower 4 bits
uint8_t qh[QK_K/4]; // quants, upper 2 bits
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
ggml_fp16_t d; // super-block scale
} block_q6_K;
static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
// This is only used for intermediate quantization and dot products
typedef struct {
float d; // delta
int8_t qs[QK_K]; // quants
int16_t bsums[QK_K/16]; // sum of quants in groups of 16
} block_q8_K;
static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
// Quantization
void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k);
void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);
void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);
void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);
void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);
void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
// Dequantization
void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k);
void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);
void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);
void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
// Dot product
void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
// Quantization with histogram collection
size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);

530
llama/llama-util.h Normal file
View File

@@ -0,0 +1,530 @@
/**
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// Internal header to be included only by llama.cpp.
// Contains wrappers around OS interfaces.
#ifndef LLAMA_UTIL_H
#define LLAMA_UTIL_H
#include <cstdio>
#include <cstdint>
#include <cerrno>
#include <cstring>
#include <cstdarg>
#include <cstdlib>
#include <climits>
#include <string>
#include <vector>
#include <stdexcept>
#ifdef __has_include
#if __has_include(<unistd.h>)
#include <unistd.h>
#if defined(_POSIX_MAPPED_FILES)
#include <sys/mman.h>
#endif
#if defined(_POSIX_MEMLOCK_RANGE)
#include <sys/resource.h>
#endif
#endif
#endif
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <io.h>
#include <stdio.h> // for _fseeki64
#endif
#define LLAMA_ASSERT(x) \
do { \
if (!(x)) { \
fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
abort(); \
} \
} while (0)
#ifdef __GNUC__
#ifdef __MINGW32__
__attribute__((format(gnu_printf, 1, 2)))
#else
__attribute__((format(printf, 1, 2)))
#endif
#endif
static std::string format(const char * fmt, ...) {
va_list ap, ap2;
va_start(ap, fmt);
va_copy(ap2, ap);
int size = vsnprintf(NULL, 0, fmt, ap);
LLAMA_ASSERT(size >= 0 && size < INT_MAX);
std::vector<char> buf(size + 1);
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
LLAMA_ASSERT(size2 == size);
va_end(ap2);
va_end(ap);
return std::string(buf.data(), size);
}
struct llama_file {
// use FILE * so we don't have to re-open the file to mmap
FILE * fp;
size_t size;
llama_file(const char * fname, const char * mode) {
fp = std::fopen(fname, mode);
if (fp == NULL) {
throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
}
seek(0, SEEK_END);
size = tell();
seek(0, SEEK_SET);
}
size_t tell() const {
#ifdef _WIN32
__int64 ret = _ftelli64(fp);
#else
long ret = std::ftell(fp);
#endif
LLAMA_ASSERT(ret != -1); // this really shouldn't fail
return (size_t) ret;
}
void seek(size_t offset, int whence) {
#ifdef _WIN32
int ret = _fseeki64(fp, (__int64) offset, whence);
#else
int ret = std::fseek(fp, (long) offset, whence);
#endif
LLAMA_ASSERT(ret == 0); // same
}
void read_raw(void * ptr, size_t len) const {
if (len == 0) {
return;
}
errno = 0;
std::size_t ret = std::fread(ptr, len, 1, fp);
if (ferror(fp)) {
throw std::runtime_error(format("read error: %s", strerror(errno)));
}
if (ret != 1) {
throw std::runtime_error(std::string("unexpectedly reached end of file"));
}
}
std::uint32_t read_u32() {
std::uint32_t ret;
read_raw(&ret, sizeof(ret));
return ret;
}
std::string read_string(std::uint32_t len) {
std::vector<char> chars(len);
read_raw(chars.data(), len);
return std::string(chars.data(), len);
}
void write_raw(const void * ptr, size_t len) const {
if (len == 0) {
return;
}
errno = 0;
size_t ret = std::fwrite(ptr, len, 1, fp);
if (ret != 1) {
throw std::runtime_error(format("write error: %s", strerror(errno)));
}
}
void write_u32(std::uint32_t val) {
write_raw(&val, sizeof(val));
}
~llama_file() {
if (fp) {
std::fclose(fp);
}
}
};
#if defined(_WIN32)
static std::string llama_format_win_err(DWORD err) {
LPSTR buf;
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
if (!size) {
return "FormatMessageA failed";
}
std::string ret(buf, size);
LocalFree(buf);
return ret;
}
#endif
struct llama_mmap {
void * addr;
size_t size;
llama_mmap(const llama_mmap &) = delete;
#ifdef _POSIX_MAPPED_FILES
static constexpr bool SUPPORTED = true;
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
size = file->size;
int fd = fileno(file->fp);
int flags = MAP_PRIVATE;
// prefetch/readahead impairs performance on NUMA systems
if (numa) { prefetch = 0; }
#ifdef __linux__
if (prefetch) { flags |= MAP_POPULATE; }
#endif
addr = mmap(NULL, file->size, PROT_READ | PROT_WRITE, flags, fd, 0);
if (addr == MAP_FAILED) {
throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
}
if (prefetch > 0) {
// Advise the kernel to preload the mapped memory
if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) {
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
strerror(errno));
}
}
if (numa) {
// advise the kernel not to use readahead
// (because the next page might not belong on the same node)
if (madvise(addr, file->size, MADV_RANDOM)) {
fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n",
strerror(errno));
}
}
}
~llama_mmap() {
munmap(addr, size);
}
#elif defined(_WIN32)
static constexpr bool SUPPORTED = true;
llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
(void) numa;
size = file->size;
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
DWORD error = GetLastError();
if (hMapping == NULL) {
throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
}
addr = MapViewOfFile(hMapping, FILE_MAP_COPY, 0, 0, 0);
error = GetLastError();
CloseHandle(hMapping);
if (addr == NULL) {
throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
}
#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
if (prefetch) {
// Advise the kernel to preload the mapped memory
WIN32_MEMORY_RANGE_ENTRY range;
range.VirtualAddress = addr;
range.NumberOfBytes = (SIZE_T)size;
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
llama_format_win_err(GetLastError()).c_str());
}
}
#else
#pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
#endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
}
~llama_mmap() {
if (!UnmapViewOfFile(addr)) {
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
llama_format_win_err(GetLastError()).c_str());
}
}
#else
static constexpr bool SUPPORTED = false;
llama_mmap(struct llama_file *, bool prefetch = true, bool numa = false) {
(void) prefetch;
(void) numa;
throw std::runtime_error(std::string("mmap not supported"));
}
#endif
};
// Represents some region of memory being locked using mlock or VirtualLock;
// will automatically unlock on destruction.
struct llama_mlock {
void * addr = NULL;
size_t size = 0;
bool failed_already = false;
llama_mlock() {}
llama_mlock(const llama_mlock &) = delete;
~llama_mlock() {
if (size) {
raw_unlock(addr, size);
}
}
void init(void * ptr) {
LLAMA_ASSERT(addr == NULL && size == 0);
addr = ptr;
}
void grow_to(size_t target_size) {
LLAMA_ASSERT(addr);
if (failed_already) {
return;
}
size_t granularity = lock_granularity();
target_size = (target_size + granularity - 1) & ~(granularity - 1);
if (target_size > size) {
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
size = target_size;
} else {
failed_already = true;
}
}
}
#ifdef _POSIX_MEMLOCK_RANGE
static constexpr bool SUPPORTED = true;
size_t lock_granularity() {
return (size_t) sysconf(_SC_PAGESIZE);
}
#ifdef __APPLE__
#define MLOCK_SUGGESTION \
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
#else
#define MLOCK_SUGGESTION \
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
#endif
bool raw_lock(const void * addr, size_t size) {
if (!mlock(addr, size)) {
return true;
} else {
char* errmsg = std::strerror(errno);
bool suggest = (errno == ENOMEM);
// Check if the resource limit is fine after all
struct rlimit lock_limit;
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
suggest = false;
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
suggest = false;
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
return false;
}
}
#undef MLOCK_SUGGESTION
void raw_unlock(void * addr, size_t size) {
if (munlock(addr, size)) {
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
}
}
#elif defined(_WIN32)
static constexpr bool SUPPORTED = true;
size_t lock_granularity() {
SYSTEM_INFO si;
GetSystemInfo(&si);
return (size_t) si.dwPageSize;
}
bool raw_lock(void * ptr, size_t len) {
for (int tries = 1; ; tries++) {
if (VirtualLock(ptr, len)) {
return true;
}
if (tries == 2) {
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
len, size, llama_format_win_err(GetLastError()).c_str());
return false;
}
// It failed but this was only the first try; increase the working
// set size and try again.
SIZE_T min_ws_size, max_ws_size;
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
llama_format_win_err(GetLastError()).c_str());
return false;
}
// Per MSDN: "The maximum number of pages that a process can lock
// is equal to the number of pages in its minimum working set minus
// a small overhead."
// Hopefully a megabyte is enough overhead:
size_t increment = len + 1048576;
// The minimum must be <= the maximum, so we need to increase both:
min_ws_size += increment;
max_ws_size += increment;
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
llama_format_win_err(GetLastError()).c_str());
return false;
}
}
}
void raw_unlock(void * ptr, size_t len) {
if (!VirtualUnlock(ptr, len)) {
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
llama_format_win_err(GetLastError()).c_str());
}
}
#else
static constexpr bool SUPPORTED = false;
size_t lock_granularity() {
return (size_t) 65536;
}
bool raw_lock(const void * addr, size_t len) {
fprintf(stderr, "warning: mlock not supported on this system\n");
return false;
}
void raw_unlock(const void * addr, size_t len) {}
#endif
};
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
struct llama_buffer {
uint8_t * addr = NULL;
size_t size = 0;
llama_buffer() = default;
void resize(size_t len) {
#ifdef GGML_USE_METAL
free(addr);
int result = posix_memalign((void **) &addr, getpagesize(), len);
if (result == 0) {
memset(addr, 0, len);
}
else {
addr = NULL;
}
#else
delete[] addr;
addr = new uint8_t[len];
#endif
size = len;
}
~llama_buffer() {
#ifdef GGML_USE_METAL
free(addr);
#else
delete[] addr;
#endif
addr = NULL;
}
// disable copy and move
llama_buffer(const llama_buffer&) = delete;
llama_buffer(llama_buffer&&) = delete;
llama_buffer& operator=(const llama_buffer&) = delete;
llama_buffer& operator=(llama_buffer&&) = delete;
};
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
struct llama_ctx_buffer {
uint8_t * addr = NULL;
bool is_cuda;
size_t size = 0;
llama_ctx_buffer() = default;
void resize(size_t size) {
free();
addr = (uint8_t *) ggml_cuda_host_malloc(size);
if (addr) {
is_cuda = true;
}
else {
// fall back to pageable memory
addr = new uint8_t[size];
is_cuda = false;
}
this->size = size;
}
void free() {
if (addr) {
if (is_cuda) {
ggml_cuda_host_free(addr);
}
else {
delete[] addr;
}
}
addr = NULL;
}
~llama_ctx_buffer() {
free();
}
// disable copy and move
llama_ctx_buffer(const llama_ctx_buffer&) = delete;
llama_ctx_buffer(llama_ctx_buffer&&) = delete;
llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete;
llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete;
};
#else
typedef llama_buffer llama_ctx_buffer;
#endif
#endif

3700
llama/llama.cpp Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,217 +1,282 @@
// MIT License
// Copyright (c) 2023 go-skynet authors
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package llama
// #cgo LDFLAGS: -Lbuild -lbinding -lllama -lm -lggml_static -lstdc++
// #cgo CXXFLAGS: -std=c++11
// #cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
// #include "binding/binding.h"
// #include <stdlib.h>
import "C"
/*
#cgo CPPFLAGS: -O3 -DNDEBUG=1
#cgo CXXFLAGS: -std=c++11
#cgo darwin CPPFLAGS: -DGGML_USE_METAL=1 -DGGML_METAL_NDEBUG=1
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
#include <stdlib.h>
#include "llama.h"
struct llama_sample_options
{
float repeat_penalty;
float frequency_penalty;
float presence_penalty;
float temperature;
int32_t top_k;
float top_p;
float tfs_z;
float typical_p;
int mirostat;
float mirostat_tau;
float mirostat_eta;
};
llama_token llama_sample(
struct llama_context *ctx,
struct llama_token_data *candidates,
size_t n_candidates,
const llama_token *last_tokens,
size_t n_last_tokens,
struct llama_sample_options *opts)
{
llama_token_data_array candidates_p = {
candidates,
n_candidates,
false,
};
llama_sample_repetition_penalty(
ctx, &candidates_p,
last_tokens, n_last_tokens,
opts->repeat_penalty);
llama_sample_frequency_and_presence_penalties(
ctx, &candidates_p,
last_tokens, n_last_tokens,
opts->frequency_penalty, opts->presence_penalty);
if (opts->temperature <= 0) {
return llama_sample_token_greedy(ctx, &candidates_p);
}
if (opts->mirostat == 1) {
int mirostat_m = 100;
float mirostat_mu = 2.0f * opts->mirostat_tau;
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token_mirostat(
ctx, &candidates_p,
opts->mirostat_tau, opts->mirostat_eta,
mirostat_m, &mirostat_mu);
} else if (opts->mirostat == 2) {
float mirostat_mu = 2.0f * opts->mirostat_tau;
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token_mirostat_v2(
ctx, &candidates_p,
opts->mirostat_tau, opts->mirostat_eta,
&mirostat_mu);
} else {
llama_sample_top_k(ctx, &candidates_p, opts->top_k, 1);
llama_sample_tail_free(ctx, &candidates_p, opts->tfs_z, 1);
llama_sample_typical(ctx, &candidates_p, opts->typical_p, 1);
llama_sample_top_p(ctx, &candidates_p, opts->top_p, 1);
llama_sample_temperature(ctx, &candidates_p, opts->temperature);
return llama_sample_token(ctx, &candidates_p);
}
}
*/
import "C"
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
"unicode/utf8"
"unsafe"
"github.com/jmorganca/ollama/api"
)
type LLama struct {
ctx unsafe.Pointer
embeddings bool
contextSize int
type llama struct {
params *C.struct_llama_context_params
model *C.struct_llama_model
ctx *C.struct_llama_context
api.Options
}
func New(model string, mo ModelOptions) (*LLama, error) {
modelPath := C.CString(model)
defer C.free(unsafe.Pointer(modelPath))
ctx := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
if ctx == nil {
return nil, fmt.Errorf("failed loading model")
func New(model string, opts api.Options) (*llama, error) {
if _, err := os.Stat(model); err != nil {
return nil, err
}
ll := &LLama{ctx: ctx, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
llm := llama{Options: opts}
return ll, nil
C.llama_backend_init(C.bool(llm.UseNUMA))
params := C.llama_context_default_params()
params.seed = C.uint(llm.Seed)
params.n_ctx = C.int(llm.NumCtx)
params.n_batch = C.int(llm.NumBatch)
params.n_gpu_layers = C.int(llm.NumGPU)
params.main_gpu = C.int(llm.MainGPU)
params.low_vram = C.bool(llm.LowVRAM)
params.f16_kv = C.bool(llm.F16KV)
params.logits_all = C.bool(llm.LogitsAll)
params.vocab_only = C.bool(llm.VocabOnly)
params.use_mmap = C.bool(llm.UseMMap)
params.use_mlock = C.bool(llm.UseMLock)
params.embedding = C.bool(llm.EmbeddingOnly)
llm.params = &params
cModel := C.CString(model)
defer C.free(unsafe.Pointer(cModel))
llm.model = C.llama_load_model_from_file(cModel, params)
if llm.model == nil {
return nil, errors.New("failed to load model")
}
llm.ctx = C.llama_new_context_with_model(llm.model, params)
if llm.ctx == nil {
return nil, errors.New("failed to create context")
}
// warm up the model
bos := []C.llama_token{C.llama_token_bos()}
C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
C.llama_reset_timings(llm.ctx)
return &llm, nil
}
func (l *LLama) Free() {
C.llama_binding_free_model(l.ctx)
func (llm *llama) Close() {
defer C.llama_free_model(llm.model)
defer C.llama_free(llm.ctx)
C.llama_print_timings(llm.ctx)
}
func (l *LLama) Eval(text string, opts ...PredictOption) error {
po := NewPredictOptions(opts...)
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
if input := llm.tokenize(prompt); input != nil {
embd := make([]C.llama_token, len(ctx))
for i := range ctx {
embd[i] = C.llama_token(ctx[i])
}
input := C.CString(text)
if po.Tokens == 0 {
po.Tokens = 99999999
}
defer C.free(unsafe.Pointer(input))
reverseCount := len(po.StopPrompts)
reversePrompt := make([]*C.char, reverseCount)
var pass **C.char
for i, s := range po.StopPrompts {
cs := C.CString(s)
reversePrompt[i] = cs
pass = &reversePrompt[0]
defer C.free(unsafe.Pointer(cs))
return llm.generate(append(embd, input...), fn)
}
cLogitBias := C.CString(po.LogitBias)
defer C.free(unsafe.Pointer(cLogitBias))
return errors.New("llama: tokenize")
}
cMainGPU := C.CString(po.MainGPU)
defer C.free(unsafe.Pointer(cMainGPU))
func (llm *llama) tokenize(prompt string) []C.llama_token {
cPrompt := C.CString(prompt)
defer C.free(unsafe.Pointer(cPrompt))
cTensorSplit := C.CString(po.TensorSplit)
defer C.free(unsafe.Pointer(cTensorSplit))
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
)
defer C.llama_free_params(params)
ret := C.eval(params, l.ctx, input)
if ret != 0 {
return fmt.Errorf("inference failed")
tokens := make([]C.llama_token, llm.NumCtx)
if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(tokens), C.int(len(tokens)), true); n > 0 {
return tokens[:n]
}
return nil
}
func (l *LLama) Predict(text string, po PredictOptions) (string, error) {
if po.TokenCallback != nil {
setCallback(l.ctx, po.TokenCallback)
func (llm *llama) detokenize(tokens ...C.llama_token) string {
var sb strings.Builder
for _, token := range tokens {
sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, token)))
}
input := C.CString(text)
if po.Tokens == 0 {
po.Tokens = 99999999
}
defer C.free(unsafe.Pointer(input))
out := make([]byte, po.Tokens)
reverseCount := len(po.StopPrompts)
reversePrompt := make([]*C.char, reverseCount)
var pass **C.char
for i, s := range po.StopPrompts {
cs := C.CString(s)
reversePrompt[i] = cs
pass = &reversePrompt[0]
defer C.free(unsafe.Pointer(cs))
}
cLogitBias := C.CString(po.LogitBias)
defer C.free(unsafe.Pointer(cLogitBias))
cMainGPU := C.CString(po.MainGPU)
defer C.free(unsafe.Pointer(cMainGPU))
cTensorSplit := C.CString(po.TensorSplit)
defer C.free(unsafe.Pointer(cTensorSplit))
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
)
defer C.llama_free_params(params)
ret := C.llama_predict(params, l.ctx, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
if ret != 0 {
return "", fmt.Errorf("inference failed")
}
res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
res = strings.TrimPrefix(res, " ")
res = strings.TrimPrefix(res, text)
res = strings.TrimPrefix(res, "\n")
for _, s := range po.StopPrompts {
res = strings.TrimRight(res, s)
}
if po.TokenCallback != nil {
setCallback(l.ctx, nil)
}
return res, nil
return sb.String()
}
// CGo only allows us to use static calls from C to Go, we can't just dynamically pass in func's.
// This is the next best thing, we register the callbacks in this map and call tokenCallback from
// the C code. We also attach a finalizer to LLama, so it will unregister the callback when the
// garbage collection frees it.
func (llm *llama) generate(input []C.llama_token, fn func(api.GenerateResponse)) error {
var opts C.struct_llama_sample_options
opts.repeat_penalty = C.float(llm.RepeatPenalty)
opts.frequency_penalty = C.float(llm.FrequencyPenalty)
opts.presence_penalty = C.float(llm.PresencePenalty)
opts.temperature = C.float(llm.Temperature)
opts.top_k = C.int(llm.TopK)
opts.top_p = C.float(llm.TopP)
opts.tfs_z = C.float(llm.TFSZ)
opts.typical_p = C.float(llm.TypicalP)
opts.mirostat = C.int(llm.Mirostat)
opts.mirostat_tau = C.float(llm.MirostatTau)
opts.mirostat_eta = C.float(llm.MirostatEta)
// SetTokenCallback registers a callback for the individual tokens created when running Predict. It
// will be called once for each token. The callback shall return true as long as the model should
// continue predicting the next token. When the callback returns false the predictor will return.
// The tokens are just converted into Go strings, they are not trimmed or otherwise changed. Also
// the tokens may not be valid UTF-8.
// Pass in nil to remove a callback.
//
// It is save to call this method while a prediction is running.
func (l *LLama) SetTokenCallback(callback func(token string) bool) {
setCallback(l.ctx, callback)
}
output := deque[C.llama_token]{capacity: llm.NumCtx}
var (
m sync.Mutex
callbacks = map[uintptr]func(string) bool{}
)
//export tokenCallback
func tokenCallback(statePtr unsafe.Pointer, token *C.char) bool {
m.Lock()
defer m.Unlock()
if callback, ok := callbacks[uintptr(statePtr)]; ok {
return callback(C.GoString(token))
context := deque[int]{capacity: llm.NumCtx / 2}
for _, in := range input {
context.PushLeft(int(in))
}
return true
}
var b bytes.Buffer
for C.llama_get_kv_cache_token_count(llm.ctx) < C.int(llm.NumCtx) {
if retval := C.llama_eval(llm.ctx, unsafe.SliceData(input), C.int(len(input)), C.llama_get_kv_cache_token_count(llm.ctx), C.int(llm.NumThread)); retval != 0 {
return errors.New("llama: eval")
}
// setCallback can be used to register a token callback for LLama. Pass in a nil callback to
// remove the callback.
func setCallback(statePtr unsafe.Pointer, callback func(string) bool) {
m.Lock()
defer m.Unlock()
token, err := llm.sample(output, &opts)
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
if callback == nil {
delete(callbacks, uintptr(statePtr))
} else {
callbacks[uintptr(statePtr)] = callback
b.WriteString(llm.detokenize(token))
if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
// call the callback
fn(api.GenerateResponse{
Response: b.String(),
})
output.PushLeft(token)
context.PushLeft(int(token))
b.Reset()
}
input = []C.llama_token{token}
}
dur := func(ms float64) time.Duration {
d, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
if err != nil {
panic(err)
}
return d
}
timings := C.llama_get_timings(llm.ctx)
fn(api.GenerateResponse{
Done: true,
Context: context.Data(),
PromptEvalCount: int(timings.n_p_eval),
PromptEvalDuration: dur(float64(timings.t_p_eval_ms)),
EvalCount: int(timings.n_eval),
EvalDuration: dur(float64(timings.t_eval_ms)),
})
return nil
}
func (llm *llama) sample(output deque[C.llama_token], opts *C.struct_llama_sample_options) (C.llama_token, error) {
numVocab := int(C.llama_n_vocab(llm.ctx))
logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)
candidates := deque[C.struct_llama_token_data]{capacity: numVocab}
for i := 0; i < candidates.Cap(); i++ {
candidates.PushLeft(C.struct_llama_token_data{
id: C.int(i),
logit: logits[i],
p: 0,
})
}
token := C.llama_sample(
llm.ctx,
unsafe.SliceData(candidates.Data()), C.size_t(candidates.Len()),
unsafe.SliceData(output.Data()), C.size_t(output.Len()),
opts)
if token != C.llama_token_eos() {
return token, nil
}
return 0, io.EOF
}

410
llama/llama.h Normal file
View File

@@ -0,0 +1,410 @@
/**
* llama.cpp - git 5bf2a2771886ee86137e01dbc7492f78fb392066
*
* MIT License
*
* Copyright (c) 2023 Georgi Gerganov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef LLAMA_H
#define LLAMA_H
#include "ggml.h"
#ifdef GGML_USE_CUBLAS
#include "ggml-cuda.h"
#define LLAMA_MAX_DEVICES GGML_CUDA_MAX_DEVICES
#else
#define LLAMA_MAX_DEVICES 1
#endif // GGML_USE_CUBLAS
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#ifdef LLAMA_SHARED
# if defined(_WIN32) && !defined(__MINGW32__)
# ifdef LLAMA_BUILD
# define LLAMA_API __declspec(dllexport)
# else
# define LLAMA_API __declspec(dllimport)
# endif
# else
# define LLAMA_API __attribute__ ((visibility ("default")))
# endif
#else
# define LLAMA_API
#endif
#ifdef __GNUC__
# define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
#elif defined(_MSC_VER)
# define DEPRECATED(func, hint) __declspec(deprecated(hint)) func
#else
# define DEPRECATED(func, hint) func
#endif
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
#define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf'
#define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml'
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
#define LLAMA_FILE_VERSION 3
#define LLAMA_FILE_MAGIC LLAMA_FILE_MAGIC_GGJT
#define LLAMA_FILE_MAGIC_UNVERSIONED LLAMA_FILE_MAGIC_GGML
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
#define LLAMA_SESSION_VERSION 1
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
#define LLAMA_SUPPORTS_GPU_OFFLOAD
#endif
#ifdef __cplusplus
extern "C" {
#endif
//
// C interface
//
// TODO: show sample usage
//
struct llama_model;
struct llama_context;
typedef int llama_token;
typedef struct llama_token_data {
llama_token id; // token id
float logit; // log-odds of the token
float p; // probability of the token
} llama_token_data;
typedef struct llama_token_data_array {
llama_token_data * data;
size_t size;
bool sorted;
} llama_token_data_array;
typedef void (*llama_progress_callback)(float progress, void *ctx);
struct llama_context_params {
uint32_t seed; // RNG seed, -1 for random
int32_t n_ctx; // text context
int32_t n_batch; // prompt processing batch size
int32_t n_gpu_layers; // number of layers to store in VRAM
int32_t main_gpu; // the GPU that is used for scratch and small tensors
float tensor_split[LLAMA_MAX_DEVICES]; // how to split layers across multiple GPUs
// called with a progress value between 0 and 1, pass NULL to disable
llama_progress_callback progress_callback;
// context pointer passed to the progress callback
void * progress_callback_user_data;
// Keep the booleans together to avoid misalignment during copy-by-value.
bool low_vram; // if true, reduce VRAM usage at the cost of performance
bool f16_kv; // use fp16 for KV cache
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool vocab_only; // only load the vocabulary, no weights
bool use_mmap; // use mmap if possible
bool use_mlock; // force system to keep model in RAM
bool embedding; // embedding mode only
};
// model file types
enum llama_ftype {
LLAMA_FTYPE_ALL_F32 = 0,
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
};
// model quantization parameters
typedef struct llama_model_quantize_params {
int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
enum llama_ftype ftype; // quantize to this llama_ftype
bool allow_requantize; // allow quantizing non-f32/f16 tensors
bool quantize_output_tensor; // quantize output.weight
} llama_model_quantize_params;
// performance timing information
struct llama_timings {
double t_start_ms;
double t_end_ms;
double t_load_ms;
double t_sample_ms;
double t_p_eval_ms;
double t_eval_ms;
int32_t n_sample;
int32_t n_p_eval;
int32_t n_eval;
};
LLAMA_API struct llama_context_params llama_context_default_params();
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
LLAMA_API bool llama_mmap_supported();
LLAMA_API bool llama_mlock_supported();
// TODO: not great API - very likely to change
// Initialize the llama + ggml backend
// If numa is true, use NUMA optimizations
// Call once at the start of the program
LLAMA_API void llama_backend_init(bool numa);
// Call once at the end of the program - currently only used for MPI
LLAMA_API void llama_backend_free();
LLAMA_API int64_t llama_time_us();
LLAMA_API struct llama_model * llama_load_model_from_file(
const char * path_model,
struct llama_context_params params);
LLAMA_API void llama_free_model(struct llama_model * model);
LLAMA_API struct llama_context * llama_new_context_with_model(
struct llama_model * model,
struct llama_context_params params);
// Various functions for loading a ggml llama model.
// Allocate (almost) all memory needed for the model.
// Return NULL on failure
LLAMA_API DEPRECATED(struct llama_context * llama_init_from_file(
const char * path_model,
struct llama_context_params params),
"please use llama_load_model_from_file combined with llama_new_context_with_model instead");
// Frees all allocated memory
LLAMA_API void llama_free(struct llama_context * ctx);
// Returns 0 on success
LLAMA_API int llama_model_quantize(
const char * fname_inp,
const char * fname_out,
const llama_model_quantize_params * params);
// Apply a LoRA adapter to a loaded model
// path_base_model is the path to a higher quality model to use as a base for
// the layers modified by the adapter. Can be NULL to use the current loaded model.
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
// will be applied on top of the previous one
// Returns 0 on success
LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
struct llama_context * ctx,
const char * path_lora,
const char * path_base_model,
int n_threads),
"please use llama_model_apply_lora_from_file instead");
LLAMA_API int llama_model_apply_lora_from_file(
const struct llama_model * model,
const char * path_lora,
const char * path_base_model,
int n_threads);
// Returns the number of tokens in the KV cache
LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
// Sets the current rng seed.
LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
// Returns the maximum size in bytes of the state (rng, logits, embedding
// and kv_cache) - will often be smaller after compacting tokens
LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
// Copies the state to the specified destination address.
// Destination needs to have allocated enough memory.
// Returns the number of bytes copied
LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst);
// Set the state reading from the specified address
// Returns the number of bytes read
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src);
// Save/load session file
LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
LLAMA_API bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count);
// Run the llama inference to obtain the logits and probabilities for the next token.
// tokens + n_tokens is the provided batch of new tokens to process
// n_past is the number of tokens to use from previous eval calls
// Returns 0 on success
LLAMA_API int llama_eval(
struct llama_context * ctx,
const llama_token * tokens,
int n_tokens,
int n_past,
int n_threads);
// Same as llama_eval, but use float matrix input directly.
LLAMA_API int llama_eval_embd(
struct llama_context * ctx,
const float * embd,
int n_tokens,
int n_past,
int n_threads);
// Export a static computation graph for context of 511 and batch size of 1
// NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these
// parameters here to keep things simple
// IMPORTANT: do not use for anything else other than debugging and testing!
LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
// Convert the provided text into tokens.
// The tokens pointer must be large enough to hold the resulting tokens.
// Returns the number of tokens on success, no more than n_max_tokens
// Returns a negative number on failure - the number of tokens that would have been returned
// TODO: not sure if correct
LLAMA_API int llama_tokenize(
struct llama_context * ctx,
const char * text,
llama_token * tokens,
int n_max_tokens,
bool add_bos);
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
// Get the vocabulary as output parameters.
// Returns number of results.
LLAMA_API int llama_get_vocab(
const struct llama_context * ctx,
const char * * strings,
float * scores,
int capacity);
// Token logits obtained from the last call to llama_eval()
// The logits for the last token are stored in the last row
// Can be mutated in order to change the probabilities of the next token
// Rows: n_tokens
// Cols: n_vocab
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
// Get the embeddings for the input
// shape: [n_embd] (1-dimensional)
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
// Token Id -> String. Uses the vocabulary in the provided context
LLAMA_API const char * llama_token_to_str(const struct llama_context * ctx, llama_token token);
// Special tokens
LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence
LLAMA_API llama_token llama_token_eos(); // end-of-sentence
LLAMA_API llama_token llama_token_nl(); // next-line
// Sampling functions
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
/// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
/// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted.
/// @params guidance_ctx A separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
/// @params scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
/// @params smooth_factor Smooth factor between guidance logits and original logits. 1.0f means only use guidance logits. 0.0f means only original logits.
LLAMA_API void llama_sample_classifier_free_guidance(
struct llama_context * ctx,
llama_token_data_array * candidates,
struct llama_context * guidance_ctx,
float scale,
float smooth_factor);
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep);
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep);
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
/// @details Selects the token with the highest probability.
LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
/// @details Randomly selects a token from the candidates based on their probabilities.
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
// Performance information
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
LLAMA_API void llama_print_timings(struct llama_context * ctx);
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
// Print system information
LLAMA_API const char * llama_print_system_info(void);
#ifdef __cplusplus
}
#endif
// Internal API to be implemented by llama.cpp and used by tests/benchmarks only
#ifdef LLAMA_API_INTERNAL
#include <vector>
#include <string>
struct ggml_tensor;
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
#endif
#endif // LLAMA_H

View File

@@ -1,9 +0,0 @@
//go:build cublas
// +build cublas
package llama
/*
#cgo LDFLAGS: -lcublas -lcudart -L/usr/local/cuda/lib64/
*/
import "C"

View File

@@ -1,2 +0,0 @@
//go:build metal
package llama

View File

@@ -1,9 +0,0 @@
//go:build openblas
// +build openblas
package llama
/*
#cgo LDFLAGS: -lopenblas
*/
import "C"

View File

@@ -1,375 +0,0 @@
// MIT License
// Copyright (c) 2023 go-skynet authors
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package llama
type ModelOptions struct {
ContextSize int
Seed int
NBatch int
F16Memory bool
MLock bool
MMap bool
VocabOnly bool
LowVRAM bool
Embeddings bool
NUMA bool
NGPULayers int
MainGPU string
TensorSplit string
}
type PredictOptions struct {
Seed, Threads, Tokens, TopK, Repeat, Batch, NKeep int
TopP, Temperature, Penalty float64
F16KV bool
DebugMode bool
StopPrompts []string
IgnoreEOS bool
TailFreeSamplingZ float64
TypicalP float64
FrequencyPenalty float64
PresencePenalty float64
Mirostat int
MirostatETA float64
MirostatTAU float64
PenalizeNL bool
LogitBias string
TokenCallback func(string) bool
MLock, MMap bool
MainGPU string
TensorSplit string
}
type PredictOption func(p *PredictOptions)
type ModelOption func(p *ModelOptions)
var DefaultModelOptions ModelOptions = ModelOptions{
ContextSize: 512,
Seed: 0,
F16Memory: false,
MLock: false,
Embeddings: false,
MMap: true,
LowVRAM: false,
}
var DefaultOptions PredictOptions = PredictOptions{
Seed: -1,
Threads: 4,
Tokens: 128,
Penalty: 1.1,
Repeat: 64,
Batch: 512,
NKeep: 64,
TopK: 40,
TopP: 0.95,
TailFreeSamplingZ: 1.0,
TypicalP: 1.0,
Temperature: 0.8,
FrequencyPenalty: 0.0,
PresencePenalty: 0.0,
Mirostat: 0,
MirostatTAU: 5.0,
MirostatETA: 0.1,
MMap: true,
}
// SetContext sets the context size.
func SetContext(c int) ModelOption {
return func(p *ModelOptions) {
p.ContextSize = c
}
}
func SetModelSeed(c int) ModelOption {
return func(p *ModelOptions) {
p.Seed = c
}
}
// SetContext sets the context size.
func SetMMap(b bool) ModelOption {
return func(p *ModelOptions) {
p.MMap = b
}
}
// SetNBatch sets the n_Batch
func SetNBatch(n_batch int) ModelOption {
return func(p *ModelOptions) {
p.NBatch = n_batch
}
}
// Set sets the tensor split for the GPU
func SetTensorSplit(maingpu string) ModelOption {
return func(p *ModelOptions) {
p.TensorSplit = maingpu
}
}
// SetMainGPU sets the main_gpu
func SetMainGPU(maingpu string) ModelOption {
return func(p *ModelOptions) {
p.MainGPU = maingpu
}
}
// SetPredictionTensorSplit sets the tensor split for the GPU
func SetPredictionTensorSplit(maingpu string) PredictOption {
return func(p *PredictOptions) {
p.TensorSplit = maingpu
}
}
// SetPredictionMainGPU sets the main_gpu
func SetPredictionMainGPU(maingpu string) PredictOption {
return func(p *PredictOptions) {
p.MainGPU = maingpu
}
}
var VocabOnly ModelOption = func(p *ModelOptions) {
p.VocabOnly = true
}
var EnabelLowVRAM ModelOption = func(p *ModelOptions) {
p.LowVRAM = true
}
var EnableNUMA ModelOption = func(p *ModelOptions) {
p.NUMA = true
}
var EnableEmbeddings ModelOption = func(p *ModelOptions) {
p.Embeddings = true
}
var EnableF16Memory ModelOption = func(p *ModelOptions) {
p.F16Memory = true
}
var EnableF16KV PredictOption = func(p *PredictOptions) {
p.F16KV = true
}
var Debug PredictOption = func(p *PredictOptions) {
p.DebugMode = true
}
var EnableMLock ModelOption = func(p *ModelOptions) {
p.MLock = true
}
// Create a new PredictOptions object with the given options.
func NewModelOptions(opts ...ModelOption) ModelOptions {
p := DefaultModelOptions
for _, opt := range opts {
opt(&p)
}
return p
}
var IgnoreEOS PredictOption = func(p *PredictOptions) {
p.IgnoreEOS = true
}
// SetMlock sets the memory lock.
func SetMlock(b bool) PredictOption {
return func(p *PredictOptions) {
p.MLock = b
}
}
// SetMemoryMap sets memory mapping.
func SetMemoryMap(b bool) PredictOption {
return func(p *PredictOptions) {
p.MMap = b
}
}
// SetGPULayers sets the number of GPU layers to use to offload computation
func SetGPULayers(n int) ModelOption {
return func(p *ModelOptions) {
p.NGPULayers = n
}
}
// SetTokenCallback sets the prompts that will stop predictions.
func SetTokenCallback(fn func(string) bool) PredictOption {
return func(p *PredictOptions) {
p.TokenCallback = fn
}
}
// SetStopWords sets the prompts that will stop predictions.
func SetStopWords(stop ...string) PredictOption {
return func(p *PredictOptions) {
p.StopPrompts = stop
}
}
// SetSeed sets the random seed for sampling text generation.
func SetSeed(seed int) PredictOption {
return func(p *PredictOptions) {
p.Seed = seed
}
}
// SetThreads sets the number of threads to use for text generation.
func SetThreads(threads int) PredictOption {
return func(p *PredictOptions) {
p.Threads = threads
}
}
// SetTokens sets the number of tokens to generate.
func SetTokens(tokens int) PredictOption {
return func(p *PredictOptions) {
p.Tokens = tokens
}
}
// SetTopK sets the value for top-K sampling.
func SetTopK(topk int) PredictOption {
return func(p *PredictOptions) {
p.TopK = topk
}
}
// SetTopP sets the value for nucleus sampling.
func SetTopP(topp float64) PredictOption {
return func(p *PredictOptions) {
p.TopP = topp
}
}
// SetTemperature sets the temperature value for text generation.
func SetTemperature(temp float64) PredictOption {
return func(p *PredictOptions) {
p.Temperature = temp
}
}
// SetPenalty sets the repetition penalty for text generation.
func SetPenalty(penalty float64) PredictOption {
return func(p *PredictOptions) {
p.Penalty = penalty
}
}
// SetRepeat sets the number of times to repeat text generation.
func SetRepeat(repeat int) PredictOption {
return func(p *PredictOptions) {
p.Repeat = repeat
}
}
// SetBatch sets the batch size.
func SetBatch(size int) PredictOption {
return func(p *PredictOptions) {
p.Batch = size
}
}
// SetKeep sets the number of tokens from initial prompt to keep.
func SetNKeep(n int) PredictOption {
return func(p *PredictOptions) {
p.NKeep = n
}
}
// Create a new PredictOptions object with the given options.
func NewPredictOptions(opts ...PredictOption) PredictOptions {
p := DefaultOptions
for _, opt := range opts {
opt(&p)
}
return p
}
// SetTailFreeSamplingZ sets the tail free sampling, parameter z.
func SetTailFreeSamplingZ(tfz float64) PredictOption {
return func(p *PredictOptions) {
p.TailFreeSamplingZ = tfz
}
}
// SetTypicalP sets the typicality parameter, p_typical.
func SetTypicalP(tp float64) PredictOption {
return func(p *PredictOptions) {
p.TypicalP = tp
}
}
// SetFrequencyPenalty sets the frequency penalty parameter, freq_penalty.
func SetFrequencyPenalty(fp float64) PredictOption {
return func(p *PredictOptions) {
p.FrequencyPenalty = fp
}
}
// SetPresencePenalty sets the presence penalty parameter, presence_penalty.
func SetPresencePenalty(pp float64) PredictOption {
return func(p *PredictOptions) {
p.PresencePenalty = pp
}
}
// SetMirostat sets the mirostat parameter.
func SetMirostat(m int) PredictOption {
return func(p *PredictOptions) {
p.Mirostat = m
}
}
// SetMirostatETA sets the mirostat ETA parameter.
func SetMirostatETA(me float64) PredictOption {
return func(p *PredictOptions) {
p.MirostatETA = me
}
}
// SetMirostatTAU sets the mirostat TAU parameter.
func SetMirostatTAU(mt float64) PredictOption {
return func(p *PredictOptions) {
p.MirostatTAU = mt
}
}
// SetPenalizeNL sets whether to penalize newlines or not.
func SetPenalizeNL(pnl bool) PredictOption {
return func(p *PredictOptions) {
p.PenalizeNL = pnl
}
}
// SetLogitBias sets the logit bias parameter.
func SetLogitBias(lb string) PredictOption {
return func(p *PredictOptions) {
p.LogitBias = lb
}
}

104
llama/utils.go Normal file
View File

@@ -0,0 +1,104 @@
package llama
type node[T any] struct {
t T
next *node[T]
prev *node[T]
}
type deque[T any] struct {
head *node[T]
tail *node[T]
size int
capacity int
}
func (d *deque[T]) Empty() bool {
return d.size == 0
}
func (d *deque[T]) Len() int {
return d.size
}
func (d *deque[T]) Cap() int {
return d.capacity
}
func (d *deque[T]) Push(t T) {
if d.capacity > 0 && d.size >= d.capacity {
d.PopLeft()
}
n := node[T]{t: t}
if d.head != nil {
n.next = d.head
d.head.prev = &n
d.head = &n
} else {
d.head = &n
d.tail = &n
}
d.size++
}
func (d *deque[T]) PushLeft(t T) {
if d.capacity > 0 && d.size >= d.capacity {
d.Pop()
}
n := node[T]{t: t}
if d.tail != nil {
n.prev = d.tail
d.tail.next = &n
d.tail = &n
} else {
d.head = &n
d.tail = &n
}
d.size++
}
func (d *deque[T]) Pop() *T {
if d.Empty() {
return nil
}
head := d.head
d.head = head.next
if d.head != nil {
d.head.prev = nil
} else {
d.tail = nil
}
d.size--
return &head.t
}
func (d *deque[T]) PopLeft() *T {
if d.Empty() {
return nil
}
tail := d.tail
d.tail = tail.prev
if d.tail != nil {
d.tail.next = nil
} else {
d.head = nil
}
d.size--
return &tail.t
}
func (d *deque[T]) Data() (data []T) {
for n := d.head; n != nil; n = n.next {
data = append(data, n.t)
}
return data
}

View File

@@ -1,9 +1,11 @@
package main
import (
"context"
"github.com/jmorganca/ollama/cmd"
)
func main() {
cmd.NewCLI().Execute()
cmd.NewCLI().ExecuteContext(context.Background())
}

View File

@@ -25,14 +25,14 @@
},
{
"name": "vicuna",
"display_name": "Wizard Vicuna Uncensored",
"parameters": "13B",
"url": "https://huggingface.co/TheBloke/Wizard-Vicuna-13B-Uncensored-GGML/resolve/main/Wizard-Vicuna-13B-Uncensored.ggmlv3.q2_K.bin",
"short_description": "An uncensored model with no guardrails.",
"description": "This model is trained with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA.",
"display_name": "Vicuna",
"parameters": "7B",
"url": "https://huggingface.co/TheBloke/vicuna-7B-v1.3-GGML/resolve/main/vicuna-7b-v1.3.ggmlv3.q4_0.bin",
"short_description": "Vicuna is a chat assistant trained by fine-tuning LLaMA on user-shared conversations collected from ShareGPT.",
"description": "The primary use of Vicuna is research on large language models and chatbots. The primary intended users of the model are researchers and hobbyists in natural language processing, machine learning, and artificial intelligence.",
"published_by": "TheBloke",
"original_author": "ehartford",
"original_url": "https://huggingface.co/ehartford/Wizard-Vicuna-13B-Uncensored",
"license:": "GPL"
"original_author": "LMSYS",
"original_url": "https://huggingface.co/lmsys/vicuna-7b-v1.3",
"license:": "Non-commercial"
}
]
]

77
parser/parser.go Normal file
View File

@@ -0,0 +1,77 @@
package parser
import (
"bufio"
"fmt"
"io"
"strings"
)
type Command struct {
Name string
Arg string
}
func Parse(reader io.Reader) ([]Command, error) {
var commands []Command
var foundModel bool
scanner := bufio.NewScanner(reader)
multiline := false
var multilineCommand *Command
for scanner.Scan() {
line := scanner.Text()
if multiline {
// If we're in a multiline string and the line is """, end the multiline string.
if strings.TrimSpace(line) == `"""` {
multiline = false
commands = append(commands, *multilineCommand)
} else {
// Otherwise, append the line to the multiline string.
multilineCommand.Arg += "\n" + line
}
continue
}
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
command := Command{}
switch strings.ToUpper(fields[0]) {
case "FROM":
command.Name = "model"
command.Arg = fields[1]
if command.Arg == "" {
return nil, fmt.Errorf("no model specified in FROM line")
}
foundModel = true
case "PROMPT", "LICENSE":
command.Name = strings.ToLower(fields[0])
if fields[1] == `"""` {
multiline = true
multilineCommand = &command
multilineCommand.Arg = ""
} else {
command.Arg = strings.Join(fields[1:], " ")
}
case "PARAMETER":
command.Name = fields[1]
command.Arg = strings.Join(fields[2:], " ")
default:
continue
}
if !multiline {
commands = append(commands, command)
}
}
if !foundModel {
return nil, fmt.Errorf("no FROM line for the model was specified")
}
if multiline {
return nil, fmt.Errorf("unclosed multiline string")
}
return commands, scanner.Err()
}

21
progressbar/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Zack
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

121
progressbar/README.md Normal file
View File

@@ -0,0 +1,121 @@
# progressbar
[![CI](https://github.com/schollz/progressbar/actions/workflows/ci.yml/badge.svg?branch=main&event=push)](https://github.com/schollz/progressbar/actions/workflows/ci.yml)
[![go report card](https://goreportcard.com/badge/github.com/schollz/progressbar)](https://goreportcard.com/report/github.com/schollz/progressbar)
[![coverage](https://img.shields.io/badge/coverage-84%25-brightgreen.svg)](https://gocover.io/github.com/schollz/progressbar)
[![godocs](https://godoc.org/github.com/schollz/progressbar?status.svg)](https://godoc.org/github.com/schollz/progressbar/v3)
A very simple thread-safe progress bar which should work on every OS without problems. I needed a progressbar for [croc](https://github.com/schollz/croc) and everything I tried had problems, so I made another one. In order to be OS agnostic I do not plan to support [multi-line outputs](https://github.com/schollz/progressbar/issues/6).
## Install
```
go get -u github.com/schollz/progressbar/v3
```
## Usage
### Basic usage
```golang
bar := progressbar.Default(100)
for i := 0; i < 100; i++ {
bar.Add(1)
time.Sleep(40 * time.Millisecond)
}
```
which looks like:
![Example of basic bar](examples/basic/basic.gif)
### I/O operations
The `progressbar` implements an `io.Writer` so it can automatically detect the number of bytes written to a stream, so you can use it as a progressbar for an `io.Reader`.
```golang
req, _ := http.NewRequest("GET", "https://dl.google.com/go/go1.14.2.src.tar.gz", nil)
resp, _ := http.DefaultClient.Do(req)
defer resp.Body.Close()
f, _ := os.OpenFile("go1.14.2.src.tar.gz", os.O_CREATE|os.O_WRONLY, 0644)
defer f.Close()
bar := progressbar.DefaultBytes(
resp.ContentLength,
"downloading",
)
io.Copy(io.MultiWriter(f, bar), resp.Body)
```
which looks like:
![Example of download bar](examples/download/download.gif)
### Progress bar with unknown length
A progressbar with unknown length is a spinner. Any bar with -1 length will automatically convert it to a spinner with a customizable spinner type. For example, the above code can be run and set the `resp.ContentLength` to `-1`.
which looks like:
![Example of download bar with unknown length](examples/download-unknown/download-unknown.gif)
### Customization
There is a lot of customization that you can do - change the writer, the color, the width, description, theme, etc. See [all the options](https://pkg.go.dev/github.com/schollz/progressbar/v3?tab=doc#Option).
```golang
bar := progressbar.NewOptions(1000,
progressbar.OptionSetWriter(ansi.NewAnsiStdout()),
progressbar.OptionEnableColorCodes(true),
progressbar.OptionShowBytes(true),
progressbar.OptionSetWidth(15),
progressbar.OptionSetDescription("[cyan][1/3][reset] Writing moshable file..."),
progressbar.OptionSetTheme(progressbar.Theme{
Saucer: "[green]=[reset]",
SaucerHead: "[green]>[reset]",
SaucerPadding: " ",
BarStart: "[",
BarEnd: "]",
}))
for i := 0; i < 1000; i++ {
bar.Add(1)
time.Sleep(5 * time.Millisecond)
}
```
which looks like:
![Example of customized bar](examples/customization/customization.gif)
## Contributing
Pull requests are welcome. Feel free to...
- Revise documentation
- Add new features
- Fix bugs
- Suggest improvements
## Thanks
Thanks [@Dynom](https://github.com/dynom) for massive improvements in version 2.0!
Thanks [@CrushedPixel](https://github.com/CrushedPixel) for adding descriptions and color code support!
Thanks [@MrMe42](https://github.com/MrMe42) for adding some minor features!
Thanks [@tehstun](https://github.com/tehstun) for some great PRs!
Thanks [@Benzammour](https://github.com/Benzammour) and [@haseth](https://github.com/haseth) for helping create v3!
Thanks [@briandowns](https://github.com/briandowns) for compiling the list of spinners.
## License
MIT

1098
progressbar/progressbar.go Normal file

File diff suppressed because it is too large Load Diff

80
progressbar/spinners.go Normal file
View File

@@ -0,0 +1,80 @@
package progressbar
var spinners = map[int][]string{
0: {"←", "↖", "↑", "↗", "→", "↘", "↓", "↙"},
1: {"▁", "▃", "▄", "▅", "▆", "▇", "█", "▇", "▆", "▅", "▄", "▃", "▁"},
2: {"▖", "▘", "▝", "▗"},
3: {"┤", "┘", "┴", "└", "├", "┌", "┬", "┐"},
4: {"◢", "◣", "◤", "◥"},
5: {"◰", "◳", "◲", "◱"},
6: {"◴", "◷", "◶", "◵"},
7: {"◐", "◓", "◑", "◒"},
8: {".", "o", "O", "@", "*"},
9: {"|", "/", "-", "\\"},
10: {"◡◡", "⊙⊙", "◠◠"},
11: {"⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"},
12: {">))'>", " >))'>", " >))'>", " >))'>", " >))'>", " <'((<", " <'((<", " <'((<"},
13: {"⠁", "⠂", "⠄", "⡀", "⢀", "⠠", "⠐", "⠈"},
14: {"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"},
15: {"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"},
16: {"▉", "▊", "▋", "▌", "▍", "▎", "▏", "▎", "▍", "▌", "▋", "▊", "▉"},
17: {"■", "□", "▪", "▫"},
18: {"←", "↑", "→", "↓"},
19: {"╫", "╪"},
20: {"⇐", "⇖", "⇑", "⇗", "⇒", "⇘", "⇓", "⇙"},
21: {"⠁", "⠁", "⠉", "⠙", "⠚", "⠒", "⠂", "⠂", "⠒", "⠲", "⠴", "⠤", "⠄", "⠄", "⠤", "⠠", "⠠", "⠤", "⠦", "⠖", "⠒", "⠐", "⠐", "⠒", "⠓", "⠋", "⠉", "⠈", "⠈"},
22: {"⠈", "⠉", "⠋", "⠓", "⠒", "⠐", "⠐", "⠒", "⠖", "⠦", "⠤", "⠠", "⠠", "⠤", "⠦", "⠖", "⠒", "⠐", "⠐", "⠒", "⠓", "⠋", "⠉", "⠈"},
23: {"⠁", "⠉", "⠙", "⠚", "⠒", "⠂", "⠂", "⠒", "⠲", "⠴", "⠤", "⠄", "⠄", "⠤", "⠴", "⠲", "⠒", "⠂", "⠂", "⠒", "⠚", "⠙", "⠉", "⠁"},
24: {"⠋", "⠙", "⠚", "⠒", "⠂", "⠂", "⠒", "⠲", "⠴", "⠦", "⠖", "⠒", "⠐", "⠐", "⠒", "⠓", "⠋"},
25: {"ヲ", "ァ", "ィ", "ゥ", "ェ", "ォ", "ャ", "ュ", "ョ", "ッ", "ア", "イ", "ウ", "エ", "オ", "カ", "キ", "ク", "ケ", "コ", "サ", "シ", "ス", "セ", "ソ", "タ", "チ", "ツ", "テ", "ト", "ナ", "ニ", "ヌ", "ネ", "ノ", "ハ", "ヒ", "フ", "ヘ", "ホ", "マ", "ミ", "ム", "メ", "モ", "ヤ", "ユ", "ヨ", "ラ", "リ", "ル", "レ", "ロ", "ワ", "ン"},
26: {".", "..", "..."},
27: {"▁", "▂", "▃", "▄", "▅", "▆", "▇", "█", "▉", "▊", "▋", "▌", "▍", "▎", "▏", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█", "▇", "▆", "▅", "▄", "▃", "▂", "▁"},
28: {".", "o", "O", "°", "O", "o", "."},
29: {"+", "x"},
30: {"v", "<", "^", ">"},
31: {">>--->", " >>--->", " >>--->", " >>--->", " >>--->", " <---<<", " <---<<", " <---<<", " <---<<", "<---<<"},
32: {"|", "||", "|||", "||||", "|||||", "|||||||", "||||||||", "|||||||", "||||||", "|||||", "||||", "|||", "||", "|"},
33: {"[ ]", "[= ]", "[== ]", "[=== ]", "[==== ]", "[===== ]", "[====== ]", "[======= ]", "[======== ]", "[========= ]", "[==========]"},
34: {"(*---------)", "(-*--------)", "(--*-------)", "(---*------)", "(----*-----)", "(-----*----)", "(------*---)", "(-------*--)", "(--------*-)", "(---------*)"},
35: {"█▒▒▒▒▒▒▒▒▒", "███▒▒▒▒▒▒▒", "█████▒▒▒▒▒", "███████▒▒▒", "██████████"},
36: {"[ ]", "[=> ]", "[===> ]", "[=====> ]", "[======> ]", "[========> ]", "[==========> ]", "[============> ]", "[==============> ]", "[================> ]", "[==================> ]", "[===================>]"},
37: {"", ""},
38: {"▌", "▀", "▐▄"},
39: {"🌍", "🌎", "🌏"},
40: {"◜", "◝", "◞", "◟"},
41: {"⬒", "⬔", "⬓", "⬕"},
42: {"⬖", "⬘", "⬗", "⬙"},
43: {"[>>> >]", "[]>>>> []", "[] >>>> []", "[] >>>> []", "[] >>>> []", "[] >>>>[]", "[>> >>]"},
44: {"♠", "♣", "♥", "♦"},
45: {"➞", "➟", "➠", "➡", "➠", "➟"},
46: {" | ", ` \ `, "_ ", ` \ `, " | ", " / ", " _", " / "},
47: {" . . . .", ". . . .", ". . . .", ". . . .", ". . . . ", ". . . . ."},
48: {" | ", " / ", " _ ", ` \ `, " | ", ` \ `, " _ ", " / "},
49: {"⎺", "⎻", "⎼", "⎽", "⎼", "⎻"},
50: {"▹▹▹▹▹", "▸▹▹▹▹", "▹▸▹▹▹", "▹▹▸▹▹", "▹▹▹▸▹", "▹▹▹▹▸"},
51: {"[ ]", "[ =]", "[ ==]", "[ ===]", "[====]", "[=== ]", "[== ]", "[= ]"},
52: {"( ● )", "( ● )", "( ● )", "( ● )", "( ●)", "( ● )", "( ● )", "( ● )", "( ● )"},
53: {"✶", "✸", "✹", "✺", "✹", "✷"},
54: {"▐|\\____________▌", "▐_|\\___________▌", "▐__|\\__________▌", "▐___|\\_________▌", "▐____|\\________▌", "▐_____|\\_______▌", "▐______|\\______▌", "▐_______|\\_____▌", "▐________|\\____▌", "▐_________|\\___▌", "▐__________|\\__▌", "▐___________|\\_▌", "▐____________|\\▌", "▐____________/|▌", "▐___________/|_▌", "▐__________/|__▌", "▐_________/|___▌", "▐________/|____▌", "▐_______/|_____▌", "▐______/|______▌", "▐_____/|_______▌", "▐____/|________▌", "▐___/|_________▌", "▐__/|__________▌", "▐_/|___________▌", "▐/|____________▌"},
55: {"▐⠂ ▌", "▐⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂▌", "▐ ⠠▌", "▐ ⡀▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐ ⠠ ▌", "▐ ⠂ ▌", "▐ ⠈ ▌", "▐ ⠂ ▌", "▐ ⠠ ▌", "▐ ⡀ ▌", "▐⠠ ▌"},
56: {"¿", "?"},
57: {"⢹", "⢺", "⢼", "⣸", "⣇", "⡧", "⡗", "⡏"},
58: {"⢄", "⢂", "⢁", "⡁", "⡈", "⡐", "⡠"},
59: {". ", ".. ", "...", " ..", " .", " "},
60: {".", "o", "O", "°", "O", "o", "."},
61: {"▓", "▒", "░"},
62: {"▌", "▀", "▐", "▄"},
63: {"⊶", "⊷"},
64: {"▪", "▫"},
65: {"□", "■"},
66: {"▮", "▯"},
67: {"-", "=", "≡"},
68: {"d", "q", "p", "b"},
69: {"∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"},
70: {"🌑 ", "🌒 ", "🌓 ", "🌔 ", "🌕 ", "🌖 ", "🌗 ", "🌘 "},
71: {"☗", "☖"},
72: {"⧇", "⧆"},
73: {"◉", "◎"},
74: {"㊂", "㊀", "㊁"},
75: {"⦾", "⦿"},
}

View File

@@ -10,7 +10,9 @@ fi
OS=$(go env GOOS)
ARCH=$(go env GOARCH)
make app
go build .
npm --prefix app run make:sign
# Create a new tag if it doesn't exist.
if ! git rev-parse v$VERSION >/dev/null 2>&1; then
@@ -18,7 +20,7 @@ if ! git rev-parse v$VERSION >/dev/null 2>&1; then
git push origin v$VERSION
fi
mkdir dist
mkdir -p dist
cp app/out/make/zip/${OS}/${ARCH}/Ollama-${OS}-${ARCH}-${VERSION}.zip dist/Ollama-${OS}-${ARCH}.zip
cp ./ollama dist/ollama-${OS}-${ARCH}

881
server/images.go Normal file
View File

@@ -0,0 +1,881 @@
package server
import (
"bytes"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/parser"
)
type Model struct {
Name string `json:"name"`
ModelPath string
Prompt string
Options api.Options
}
type ManifestV2 struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Config Layer `json:"config"`
Layers []*Layer `json:"layers"`
}
type Layer struct {
MediaType string `json:"mediaType"`
Digest string `json:"digest"`
Size int `json:"size"`
}
type LayerReader struct {
Layer
io.Reader
}
type ConfigV2 struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
RootFS RootFS `json:"rootfs"`
}
type RootFS struct {
Type string `json:"type"`
DiffIDs []string `json:"diff_ids"`
}
func (m *ManifestV2) GetTotalSize() int {
var total int
for _, layer := range m.Layers {
total += layer.Size
}
total += m.Config.Size
return total
}
func GetManifest(mp ModelPath) (*ManifestV2, error) {
fp, err := mp.GetManifestPath(false)
if err != nil {
return nil, err
}
if _, err = os.Stat(fp); err != nil && !errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("couldn't find model '%s'", mp.GetShortTagname())
}
var manifest *ManifestV2
f, err := os.Open(fp)
if err != nil {
return nil, fmt.Errorf("couldn't open file '%s'", fp)
}
decoder := json.NewDecoder(f)
err = decoder.Decode(&manifest)
if err != nil {
return nil, err
}
return manifest, nil
}
func GetModel(name string) (*Model, error) {
mp := ParseModelPath(name)
manifest, err := GetManifest(mp)
if err != nil {
return nil, err
}
model := &Model{
Name: mp.GetFullTagname(),
}
for _, layer := range manifest.Layers {
filename, err := GetBlobsPath(layer.Digest)
if err != nil {
return nil, err
}
switch layer.MediaType {
case "application/vnd.ollama.image.model":
model.ModelPath = filename
case "application/vnd.ollama.image.prompt":
data, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
model.Prompt = string(data)
case "application/vnd.ollama.image.params":
params, err := os.Open(filename)
if err != nil {
return nil, err
}
defer params.Close()
var opts api.Options
if err = json.NewDecoder(params).Decode(&opts); err != nil {
return nil, err
}
model.Options = opts
}
}
return model, nil
}
func getAbsPath(fp string) (string, error) {
if strings.HasPrefix(fp, "~/") {
parts := strings.Split(fp, "/")
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
fp = filepath.Join(home, filepath.Join(parts[1:]...))
}
return os.ExpandEnv(fp), nil
}
func CreateModel(name string, mf io.Reader, fn func(status string)) error {
fn("parsing modelfile")
commands, err := parser.Parse(mf)
if err != nil {
fn(fmt.Sprintf("error: %v", err))
return err
}
var layers []*LayerReader
params := make(map[string]string)
for _, c := range commands {
log.Printf("[%s] - %s\n", c.Name, c.Arg)
switch c.Name {
case "model":
fn("looking for model")
mf, err := GetManifest(ParseModelPath(c.Arg))
if err != nil {
// if we couldn't read the manifest, try getting the bin file
fp, err := getAbsPath(c.Arg)
if err != nil {
fn("error determing path. exiting.")
return err
}
fn("creating model layer")
file, err := os.Open(fp)
if err != nil {
fn(fmt.Sprintf("couldn't find model '%s'", c.Arg))
return fmt.Errorf("failed to open file: %v", err)
}
defer file.Close()
l, err := CreateLayer(file)
if err != nil {
fn(fmt.Sprintf("couldn't create model layer: %v", err))
return fmt.Errorf("failed to create layer: %v", err)
}
l.MediaType = "application/vnd.ollama.image.model"
layers = append(layers, l)
} else {
log.Printf("manifest = %#v", mf)
for _, l := range mf.Layers {
newLayer, err := GetLayerWithBufferFromLayer(l)
if err != nil {
fn(fmt.Sprintf("couldn't read layer: %v", err))
return err
}
layers = append(layers, newLayer)
}
}
case "prompt":
fn("creating prompt layer")
// remove the prompt layer if one exists
layers = removeLayerFromLayers(layers, "application/vnd.ollama.image.prompt")
prompt := strings.NewReader(c.Arg)
l, err := CreateLayer(prompt)
if err != nil {
fn(fmt.Sprintf("couldn't create prompt layer: %v", err))
return fmt.Errorf("failed to create layer: %v", err)
}
l.MediaType = "application/vnd.ollama.image.prompt"
layers = append(layers, l)
case "license":
fn("creating license layer")
license := strings.NewReader(c.Arg)
l, err := CreateLayer(license)
if err != nil {
fn(fmt.Sprintf("couldn't create license layer: %v", err))
return fmt.Errorf("failed to create layer: %v", err)
}
l.MediaType = "application/vnd.ollama.image.license"
layers = append(layers, l)
default:
params[c.Name] = c.Arg
}
}
// Create a single layer for the parameters
if len(params) > 0 {
fn("creating parameter layer")
layers = removeLayerFromLayers(layers, "application/vnd.ollama.image.params")
paramData, err := paramsToReader(params)
if err != nil {
return fmt.Errorf("couldn't create params json: %v", err)
}
l, err := CreateLayer(paramData)
if err != nil {
return fmt.Errorf("failed to create layer: %v", err)
}
l.MediaType = "application/vnd.ollama.image.params"
layers = append(layers, l)
}
digests, err := getLayerDigests(layers)
if err != nil {
return err
}
var manifestLayers []*Layer
for _, l := range layers {
manifestLayers = append(manifestLayers, &l.Layer)
}
// Create a layer for the config object
fn("creating config layer")
cfg, err := createConfigLayer(digests)
if err != nil {
return err
}
layers = append(layers, cfg)
err = SaveLayers(layers, fn, false)
if err != nil {
fn(fmt.Sprintf("error saving layers: %v", err))
return err
}
// Create the manifest
fn("writing manifest")
err = CreateManifest(name, cfg, manifestLayers)
if err != nil {
fn(fmt.Sprintf("error creating manifest: %v", err))
return err
}
fn("success")
return nil
}
func removeLayerFromLayers(layers []*LayerReader, mediaType string) []*LayerReader {
j := 0
for _, l := range layers {
if l.MediaType != mediaType {
layers[j] = l
j++
}
}
return layers[:j]
}
func SaveLayers(layers []*LayerReader, fn func(status string), force bool) error {
// Write each of the layers to disk
for _, layer := range layers {
fp, err := GetBlobsPath(layer.Digest)
if err != nil {
return err
}
_, err = os.Stat(fp)
if os.IsNotExist(err) || force {
fn(fmt.Sprintf("writing layer %s", layer.Digest))
out, err := os.Create(fp)
if err != nil {
log.Printf("couldn't create %s", fp)
return err
}
defer out.Close()
if _, err = io.Copy(out, layer.Reader); err != nil {
return err
}
} else {
fn(fmt.Sprintf("using already created layer %s", layer.Digest))
}
}
return nil
}
func CreateManifest(name string, cfg *LayerReader, layers []*Layer) error {
mp := ParseModelPath(name)
manifest := ManifestV2{
SchemaVersion: 2,
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Config: Layer{
MediaType: cfg.MediaType,
Size: cfg.Size,
Digest: cfg.Digest,
},
Layers: layers,
}
manifestJSON, err := json.Marshal(manifest)
if err != nil {
return err
}
fp, err := mp.GetManifestPath(true)
if err != nil {
return err
}
return os.WriteFile(fp, manifestJSON, 0o644)
}
func GetLayerWithBufferFromLayer(layer *Layer) (*LayerReader, error) {
fp, err := GetBlobsPath(layer.Digest)
if err != nil {
return nil, err
}
file, err := os.Open(fp)
if err != nil {
return nil, fmt.Errorf("could not open blob: %w", err)
}
defer file.Close()
newLayer, err := CreateLayer(file)
if err != nil {
return nil, err
}
newLayer.MediaType = layer.MediaType
return newLayer, nil
}
func paramsToReader(params map[string]string) (io.ReadSeeker, error) {
opts := api.DefaultOptions()
typeOpts := reflect.TypeOf(opts)
// build map of json struct tags
jsonOpts := make(map[string]reflect.StructField)
for _, field := range reflect.VisibleFields(typeOpts) {
jsonTag := strings.Split(field.Tag.Get("json"), ",")[0]
if jsonTag != "" {
jsonOpts[jsonTag] = field
}
}
valueOpts := reflect.ValueOf(&opts).Elem()
// iterate params and set values based on json struct tags
for key, val := range params {
if opt, ok := jsonOpts[key]; ok {
field := valueOpts.FieldByName(opt.Name)
if field.IsValid() && field.CanSet() {
switch field.Kind() {
case reflect.Float32:
floatVal, err := strconv.ParseFloat(val, 32)
if err != nil {
return nil, fmt.Errorf("invalid float value %s", val)
}
field.SetFloat(floatVal)
case reflect.Int:
intVal, err := strconv.ParseInt(val, 10, 0)
if err != nil {
return nil, fmt.Errorf("invalid int value %s", val)
}
field.SetInt(intVal)
case reflect.Bool:
boolVal, err := strconv.ParseBool(val)
if err != nil {
return nil, fmt.Errorf("invalid bool value %s", val)
}
field.SetBool(boolVal)
case reflect.String:
field.SetString(val)
default:
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
}
}
}
}
bts, err := json.Marshal(opts)
if err != nil {
return nil, err
}
return bytes.NewReader(bts), nil
}
func getLayerDigests(layers []*LayerReader) ([]string, error) {
var digests []string
for _, l := range layers {
if l.Digest == "" {
return nil, fmt.Errorf("layer is missing a digest")
}
digests = append(digests, l.Digest)
}
return digests, nil
}
// CreateLayer creates a Layer object from a given file
func CreateLayer(f io.ReadSeeker) (*LayerReader, error) {
digest, size := GetSHA256Digest(f)
f.Seek(0, 0)
layer := &LayerReader{
Layer: Layer{
MediaType: "application/vnd.docker.image.rootfs.diff.tar",
Digest: digest,
Size: size,
},
Reader: f,
}
return layer, nil
}
func PushModel(name, username, password string, fn func(api.ProgressResponse)) error {
mp := ParseModelPath(name)
fn(api.ProgressResponse{Status: "retrieving manifest"})
manifest, err := GetManifest(mp)
if err != nil {
fn(api.ProgressResponse{Status: "couldn't retrieve manifest"})
return err
}
var layers []*Layer
var total int
var completed int
for _, layer := range manifest.Layers {
layers = append(layers, layer)
total += layer.Size
}
layers = append(layers, &manifest.Config)
total += manifest.Config.Size
for _, layer := range layers {
exists, err := checkBlobExistence(mp, layer.Digest, username, password)
if err != nil {
return err
}
if exists {
completed += layer.Size
fn(api.ProgressResponse{
Status: "using existing layer",
Digest: layer.Digest,
Total: total,
Completed: completed,
})
continue
}
fn(api.ProgressResponse{
Status: "starting upload",
Digest: layer.Digest,
Total: total,
Completed: completed,
})
location, err := startUpload(mp, username, password)
if err != nil {
log.Printf("couldn't start upload: %v", err)
return err
}
err = uploadBlob(location, layer, username, password)
if err != nil {
log.Printf("error uploading blob: %v", err)
return err
}
completed += layer.Size
fn(api.ProgressResponse{
Status: "upload complete",
Digest: layer.Digest,
Total: total,
Completed: completed,
})
}
fn(api.ProgressResponse{
Status: "pushing manifest",
Total: total,
Completed: completed,
})
url := fmt.Sprintf("%s://%s/v2/%s/manifests/%s", mp.ProtocolScheme, mp.Registry, mp.GetNamespaceRepository(), mp.Tag)
headers := map[string]string{
"Content-Type": "application/vnd.docker.distribution.manifest.v2+json",
}
manifestJSON, err := json.Marshal(manifest)
if err != nil {
return err
}
resp, err := makeRequest("PUT", url, headers, bytes.NewReader(manifestJSON), username, password)
if err != nil {
return err
}
defer resp.Body.Close()
// Check for success: For a successful upload, the Docker registry will respond with a 201 Created
if resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
fn(api.ProgressResponse{
Status: "success",
Total: total,
Completed: completed,
})
return nil
}
func PullModel(name, username, password string, fn func(api.ProgressResponse)) error {
mp := ParseModelPath(name)
fn(api.ProgressResponse{Status: "pulling manifest"})
manifest, err := pullModelManifest(mp, username, password)
if err != nil {
return fmt.Errorf("pull model manifest: %q", err)
}
var layers []*Layer
var total int
var completed int
for _, layer := range manifest.Layers {
layers = append(layers, layer)
total += layer.Size
}
layers = append(layers, &manifest.Config)
total += manifest.Config.Size
for _, layer := range layers {
if err := downloadBlob(mp, layer.Digest, username, password, fn); err != nil {
fn(api.ProgressResponse{Status: fmt.Sprintf("error downloading: %v", err), Digest: layer.Digest})
return err
}
completed += layer.Size
}
fn(api.ProgressResponse{Status: "writing manifest"})
manifestJSON, err := json.Marshal(manifest)
if err != nil {
return err
}
fp, err := mp.GetManifestPath(true)
if err != nil {
return err
}
err = os.WriteFile(fp, manifestJSON, 0644)
if err != nil {
log.Printf("couldn't write to %s", fp)
return err
}
fn(api.ProgressResponse{Status: "success"})
return nil
}
func pullModelManifest(mp ModelPath, username, password string) (*ManifestV2, error) {
url := fmt.Sprintf("%s://%s/v2/%s/manifests/%s", mp.ProtocolScheme, mp.Registry, mp.GetNamespaceRepository(), mp.Tag)
headers := map[string]string{
"Accept": "application/vnd.docker.distribution.manifest.v2+json",
}
resp, err := makeRequest("GET", url, headers, nil, username, password)
if err != nil {
log.Printf("couldn't get manifest: %v", err)
return nil, err
}
defer resp.Body.Close()
// Check for success: For a successful upload, the Docker registry will respond with a 201 Created
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("registry responded with code %d: %s", resp.StatusCode, body)
}
var m *ManifestV2
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
return nil, err
}
return m, err
}
func createConfigLayer(layers []string) (*LayerReader, error) {
// TODO change architecture and OS
config := ConfigV2{
Architecture: "arm64",
OS: "linux",
RootFS: RootFS{
Type: "layers",
DiffIDs: layers,
},
}
configJSON, err := json.Marshal(config)
if err != nil {
return nil, err
}
digest, size := GetSHA256Digest(bytes.NewBuffer(configJSON))
layer := &LayerReader{
Layer: Layer{
MediaType: "application/vnd.docker.container.image.v1+json",
Digest: digest,
Size: size,
},
Reader: bytes.NewBuffer(configJSON),
}
return layer, nil
}
// GetSHA256Digest returns the SHA256 hash of a given buffer and returns it, and the size of buffer
func GetSHA256Digest(r io.Reader) (string, int) {
h := sha256.New()
n, err := io.Copy(h, r)
if err != nil {
log.Fatal(err)
}
return fmt.Sprintf("sha256:%x", h.Sum(nil)), int(n)
}
func startUpload(mp ModelPath, username string, password string) (string, error) {
url := fmt.Sprintf("%s://%s/v2/%s/blobs/uploads/", mp.ProtocolScheme, mp.Registry, mp.GetNamespaceRepository())
resp, err := makeRequest("POST", url, nil, nil, username, password)
if err != nil {
log.Printf("couldn't start upload: %v", err)
return "", err
}
defer resp.Body.Close()
// Check for success
if resp.StatusCode != http.StatusAccepted {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("registry responded with code %d: %s", resp.StatusCode, body)
}
// Extract UUID location from header
location := resp.Header.Get("Location")
if location == "" {
return "", fmt.Errorf("location header is missing in response")
}
return location, nil
}
// Function to check if a blob already exists in the Docker registry
func checkBlobExistence(mp ModelPath, digest string, username string, password string) (bool, error) {
url := fmt.Sprintf("%s://%s/v2/%s/blobs/%s", mp.ProtocolScheme, mp.Registry, mp.GetNamespaceRepository(), digest)
resp, err := makeRequest("HEAD", url, nil, nil, username, password)
if err != nil {
log.Printf("couldn't check for blob: %v", err)
return false, err
}
defer resp.Body.Close()
// Check for success: If the blob exists, the Docker registry will respond with a 200 OK
return resp.StatusCode == http.StatusOK, nil
}
func uploadBlob(location string, layer *Layer, username string, password string) error {
// Create URL
url := fmt.Sprintf("%s&digest=%s", location, layer.Digest)
headers := make(map[string]string)
headers["Content-Length"] = fmt.Sprintf("%d", layer.Size)
headers["Content-Type"] = "application/octet-stream"
// TODO change from monolithic uploads to chunked uploads
// TODO allow resumability
// TODO allow canceling uploads via DELETE
// TODO allow cross repo blob mount
fp, err := GetBlobsPath(layer.Digest)
if err != nil {
return err
}
f, err := os.Open(fp)
if err != nil {
return err
}
resp, err := makeRequest("PUT", url, headers, f, username, password)
if err != nil {
log.Printf("couldn't upload blob: %v", err)
return err
}
defer resp.Body.Close()
// Check for success: For a successful upload, the Docker registry will respond with a 201 Created
if resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
return nil
}
func downloadBlob(mp ModelPath, digest string, username, password string, fn func(api.ProgressResponse)) error {
fp, err := GetBlobsPath(digest)
if err != nil {
return err
}
if fi, _ := os.Stat(fp); fi != nil {
// we already have the file, so return
fn(api.ProgressResponse{
Digest: digest,
Total: int(fi.Size()),
Completed: int(fi.Size()),
})
return nil
}
var size int64
fi, err := os.Stat(fp + "-partial")
switch {
case errors.Is(err, os.ErrNotExist):
// noop, file doesn't exist so create it
case err != nil:
return fmt.Errorf("stat: %w", err)
default:
size = fi.Size()
}
url := fmt.Sprintf("%s://%s/v2/%s/blobs/%s", mp.ProtocolScheme, mp.Registry, mp.GetNamespaceRepository(), digest)
headers := map[string]string{
"Range": fmt.Sprintf("bytes=%d-", size),
}
resp, err := makeRequest("GET", url, headers, nil, username, password)
if err != nil {
log.Printf("couldn't download blob: %v", err)
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
body, _ := ioutil.ReadAll(resp.Body)
return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
err = os.MkdirAll(path.Dir(fp), 0o700)
if err != nil {
return fmt.Errorf("make blobs directory: %w", err)
}
out, err := os.OpenFile(fp+"-partial", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
if err != nil {
panic(err)
}
defer out.Close()
remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
completed := size
total := remaining + completed
for {
fn(api.ProgressResponse{
Status: fmt.Sprintf("downloading %s", digest),
Digest: digest,
Total: int(total),
Completed: int(completed),
})
if completed >= total {
if err := os.Rename(fp+"-partial", fp); err != nil {
fn(api.ProgressResponse{
Status: fmt.Sprintf("error renaming file: %v", err),
Digest: digest,
Total: int(total),
Completed: int(completed),
})
return err
}
break
}
n, err := io.CopyN(out, resp.Body, 8192)
if err != nil && !errors.Is(err, io.EOF) {
return err
}
completed += n
}
log.Printf("success getting %s\n", digest)
return nil
}
func makeRequest(method, url string, headers map[string]string, body io.Reader, username, password string) (*http.Response, error) {
req, err := http.NewRequest(method, url, body)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Set(k, v)
}
// TODO: better auth
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return fmt.Errorf("too many redirects")
}
log.Printf("redirected to: %s\n", req.URL)
return nil
},
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
return resp, nil
}

115
server/modelpath.go Normal file
View File

@@ -0,0 +1,115 @@
package server
import (
"fmt"
"os"
"path/filepath"
"strings"
)
type ModelPath struct {
ProtocolScheme string
Registry string
Namespace string
Repository string
Tag string
}
const (
DefaultRegistry = "registry.ollama.ai"
DefaultNamespace = "library"
DefaultTag = "latest"
DefaultProtocolScheme = "https"
)
func ParseModelPath(name string) ModelPath {
slashParts := strings.Split(name, "/")
var registry, namespace, repository, tag string
switch len(slashParts) {
case 3:
registry = slashParts[0]
namespace = slashParts[1]
repository = strings.Split(slashParts[2], ":")[0]
case 2:
registry = DefaultRegistry
namespace = slashParts[0]
repository = strings.Split(slashParts[1], ":")[0]
case 1:
registry = DefaultRegistry
namespace = DefaultNamespace
repository = strings.Split(slashParts[0], ":")[0]
default:
fmt.Println("Invalid image format.")
return ModelPath{}
}
colonParts := strings.Split(name, ":")
if len(colonParts) == 2 {
tag = colonParts[1]
} else {
tag = DefaultTag
}
return ModelPath{
ProtocolScheme: DefaultProtocolScheme,
Registry: registry,
Namespace: namespace,
Repository: repository,
Tag: tag,
}
}
func (mp ModelPath) GetNamespaceRepository() string {
return fmt.Sprintf("%s/%s", mp.Namespace, mp.Repository)
}
func (mp ModelPath) GetFullTagname() string {
return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag)
}
func (mp ModelPath) GetShortTagname() string {
if mp.Registry == DefaultRegistry && mp.Namespace == DefaultNamespace {
return fmt.Sprintf("%s:%s", mp.Repository, mp.Tag)
}
return fmt.Sprintf("%s/%s:%s", mp.Namespace, mp.Repository, mp.Tag)
}
func (mp ModelPath) GetManifestPath(createDir bool) (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
path := filepath.Join(home, ".ollama", "models", "manifests", mp.Registry, mp.Namespace, mp.Repository, mp.Tag)
if createDir {
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return "", err
}
}
return path, nil
}
func GetManifestPath() (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
return filepath.Join(home, ".ollama", "models", "manifests"), nil
}
func GetBlobsPath(digest string) (string, error) {
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
path := filepath.Join(home, ".ollama", "models", "blobs", digest)
if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil {
return "", err
}
return path, nil
}

View File

@@ -1,154 +0,0 @@
package server
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"path"
"strconv"
"github.com/jmorganca/ollama/api"
)
const directoryURL = "https://ollama.ai/api/models"
type Model struct {
Name string `json:"name"`
DisplayName string `json:"display_name"`
Parameters string `json:"parameters"`
URL string `json:"url"`
ShortDescription string `json:"short_description"`
Description string `json:"description"`
PublishedBy string `json:"published_by"`
OriginalAuthor string `json:"original_author"`
OriginalURL string `json:"original_url"`
License string `json:"license"`
}
func (m *Model) FullName() string {
home, err := os.UserHomeDir()
if err != nil {
panic(err)
}
return path.Join(home, ".ollama", "models", m.Name+".bin")
}
func pull(model string, progressCh chan<- api.PullProgress) error {
remote, err := getRemote(model)
if err != nil {
return fmt.Errorf("failed to pull model: %w", err)
}
return saveModel(remote, progressCh)
}
func getRemote(model string) (*Model, error) {
// resolve the model download from our directory
resp, err := http.Get(directoryURL)
if err != nil {
return nil, fmt.Errorf("failed to get directory: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read directory: %w", err)
}
var models []Model
err = json.Unmarshal(body, &models)
if err != nil {
return nil, fmt.Errorf("failed to parse directory: %w", err)
}
for _, m := range models {
if m.Name == model {
return &m, nil
}
}
return nil, fmt.Errorf("model not found in directory: %s", model)
}
func saveModel(model *Model, progressCh chan<- api.PullProgress) error {
// this models cache directory is created by the server on startup
client := &http.Client{}
req, err := http.NewRequest("GET", model.URL, nil)
if err != nil {
return fmt.Errorf("failed to download model: %w", err)
}
// check for resume
alreadyDownloaded := int64(0)
fileInfo, err := os.Stat(model.FullName())
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("failed to check resume model file: %w", err)
}
// file doesn't exist, create it now
} else {
alreadyDownloaded = fileInfo.Size()
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", alreadyDownloaded))
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to download model: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
// already downloaded
progressCh <- api.PullProgress{
Total: alreadyDownloaded,
Completed: alreadyDownloaded,
Percent: 100,
}
return nil
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
return fmt.Errorf("failed to download model: %s", resp.Status)
}
out, err := os.OpenFile(model.FullName(), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
if err != nil {
panic(err)
}
defer out.Close()
totalSize, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
buf := make([]byte, 1024)
totalBytes := alreadyDownloaded
totalSize += alreadyDownloaded
for {
n, err := resp.Body.Read(buf)
if err != nil && err != io.EOF {
return err
}
if n == 0 {
break
}
if _, err := out.Write(buf[:n]); err != nil {
return err
}
totalBytes += int64(n)
// send progress updates
progressCh <- api.PullProgress{
Total: totalSize,
Completed: totalBytes,
Percent: float64(totalBytes) / float64(totalSize) * 100,
}
}
progressCh <- api.PullProgress{
Total: totalSize,
Completed: totalSize,
Percent: 100,
}
return nil
}

View File

@@ -1,114 +1,251 @@
package server
import (
"embed"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"math"
"net"
"net/http"
"os"
"path"
"runtime"
"path/filepath"
"strings"
"text/template"
"time"
"dario.cat/mergo"
"github.com/gin-gonic/gin"
"github.com/lithammer/fuzzysearch/fuzzy"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/llama"
)
//go:embed templates/*
var templatesFS embed.FS
var templates = template.Must(template.ParseFS(templatesFS, "templates/*.prompt"))
func cacheDir() string {
home, err := os.UserHomeDir()
if err != nil {
panic(err)
}
return path.Join(home, ".ollama")
return filepath.Join(home, ".ollama")
}
func generate(c *gin.Context) {
start := time.Now()
var req api.GenerateRequest
req.ModelOptions = api.DefaultModelOptions
req.PredictOptions = api.DefaultPredictOptions
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
model, err := GetModel(req.Model)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
opts := api.DefaultOptions()
if err := mergo.Merge(&opts, model.Options, mergo.WithOverride); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if err := mergo.Merge(&opts, req.Options, mergo.WithOverride); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
templ, err := template.New("").Parse(model.Prompt)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
var sb strings.Builder
if err = templ.Execute(&sb, req); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
req.Prompt = sb.String()
llm, err := llama.New(model.ModelPath, opts)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
defer llm.Close()
ch := make(chan any)
go func() {
defer close(ch)
llm.Predict(req.Context, req.Prompt, func(r api.GenerateResponse) {
r.Model = req.Model
r.CreatedAt = time.Now().UTC()
if r.Done {
r.TotalDuration = time.Since(start)
}
ch <- r
})
}()
streamResponse(c, ch)
}
func pull(c *gin.Context) {
var req api.PullRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ch := make(chan any)
go func() {
defer close(ch)
fn := func(r api.ProgressResponse) {
ch <- r
}
if err := PullModel(req.Name, req.Username, req.Password, fn); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}()
streamResponse(c, ch)
}
func push(c *gin.Context) {
var req api.PushRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ch := make(chan any)
go func() {
defer close(ch)
fn := func(r api.ProgressResponse) {
ch <- r
}
if err := PushModel(req.Name, req.Username, req.Password, fn); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}()
streamResponse(c, ch)
}
func create(c *gin.Context) {
var req api.CreateRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
if remoteModel, _ := getRemote(req.Model); remoteModel != nil {
req.Model = remoteModel.FullName()
// NOTE consider passing the entire Modelfile in the json instead of the path to it
file, err := os.Open(req.Path)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
if _, err := os.Stat(req.Model); err != nil {
if !errors.Is(err, os.ErrNotExist) {
defer file.Close()
ch := make(chan any)
go func() {
defer close(ch)
fn := func(status string) {
ch <- api.CreateProgress{
Status: status,
}
}
if err := CreateModel(req.Name, file, fn); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
req.Model = path.Join(cacheDir(), "models", req.Model+".bin")
}
modelOpts := getModelOpts(req)
modelOpts.NGPULayers = 1 // hard-code this for now
model, err := llama.New(req.Model, modelOpts)
if err != nil {
fmt.Println("Loading the model failed:", err.Error())
return
}
defer model.Free()
templateNames := make([]string, 0, len(templates.Templates()))
for _, template := range templates.Templates() {
templateNames = append(templateNames, template.Name())
}
match, _ := matchRankOne(path.Base(req.Model), templateNames)
if template := templates.Lookup(match); template != nil {
var sb strings.Builder
if err := template.Execute(&sb, req); err != nil {
fmt.Println("Prompt template failed:", err.Error())
return
}
req.Prompt = sb.String()
}
ch := make(chan string)
model.SetTokenCallback(func(token string) bool {
ch <- token
return true
})
predictOpts := getPredictOpts(req)
go func() {
defer close(ch)
_, err := model.Predict(req.Prompt, predictOpts)
if err != nil {
panic(err)
}
}()
streamResponse(c, ch)
}
func list(c *gin.Context) {
var models []api.ListResponseModel
fp, err := GetManifestPath()
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
fi, err := os.Stat(path)
if err != nil {
log.Printf("skipping file: %s", fp)
return nil
}
path := path[len(fp)+1:]
slashIndex := strings.LastIndex(path, "/")
if slashIndex == -1 {
return nil
}
tag := path[:slashIndex] + ":" + path[slashIndex+1:]
mp := ParseModelPath(tag)
manifest, err := GetManifest(mp)
if err != nil {
log.Printf("skipping file: %s", fp)
return nil
}
model := api.ListResponseModel{
Name: mp.GetShortTagname(),
Size: manifest.GetTotalSize(),
ModifiedAt: fi.ModTime(),
}
models = append(models, model)
}
return nil
})
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, api.ListResponse{models})
}
func Serve(ln net.Listener) error {
r := gin.Default()
r.GET("/", func(c *gin.Context) {
c.String(http.StatusOK, "Ollama is running")
})
r.POST("/api/pull", pull)
r.POST("/api/generate", generate)
r.POST("/api/create", create)
r.POST("/api/push", push)
r.GET("/api/tags", list)
log.Printf("Listening on %s", ln.Addr())
s := &http.Server{
Handler: r,
}
return s.Serve(ln)
}
func streamResponse(c *gin.Context, ch chan any) {
c.Stream(func(w io.Writer) bool {
token, ok := <-ch
val, ok := <-ch
if !ok {
return false
}
resp := api.GenerateResponse{
Response: token,
}
bts, err := json.Marshal(resp)
bts, err := json.Marshal(val)
if err != nil {
return false
}
@@ -121,129 +258,3 @@ func generate(c *gin.Context) {
return true
})
}
func Serve(ln net.Listener) error {
r := gin.Default()
r.GET("/", func(c *gin.Context) {
c.String(http.StatusOK, "Ollama is running")
})
r.POST("api/pull", func(c *gin.Context) {
var req api.PullRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
progressCh := make(chan api.PullProgress)
go func() {
defer close(progressCh)
if err := pull(req.Model, progressCh); err != nil {
var opError *net.OpError
if errors.As(err, &opError) {
result := api.PullProgress{
Error: api.Error{
Code: http.StatusBadGateway,
Message: "failed to get models from directory",
},
}
c.JSON(http.StatusBadGateway, result)
return
}
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
}()
c.Stream(func(w io.Writer) bool {
progress, ok := <-progressCh
if !ok {
return false
}
bts, err := json.Marshal(progress)
if err != nil {
return false
}
bts = append(bts, '\n')
if _, err := w.Write(bts); err != nil {
return false
}
return true
})
})
r.POST("/api/generate", generate)
log.Printf("Listening on %s", ln.Addr())
s := &http.Server{
Handler: r,
}
return s.Serve(ln)
}
func matchRankOne(source string, targets []string) (bestMatch string, bestRank int) {
bestRank = math.MaxInt
for _, target := range targets {
if rank := fuzzy.LevenshteinDistance(source, target); bestRank > rank {
bestRank = rank
bestMatch = target
}
}
return
}
func getModelOpts(req api.GenerateRequest) llama.ModelOptions {
var opts llama.ModelOptions
opts.ContextSize = req.ModelOptions.ContextSize
opts.Seed = req.ModelOptions.Seed
opts.F16Memory = req.ModelOptions.F16Memory
opts.MLock = req.ModelOptions.MLock
opts.Embeddings = req.ModelOptions.Embeddings
opts.MMap = req.ModelOptions.MMap
opts.LowVRAM = req.ModelOptions.LowVRAM
opts.NBatch = req.ModelOptions.NBatch
opts.VocabOnly = req.ModelOptions.VocabOnly
opts.NUMA = req.ModelOptions.NUMA
opts.NGPULayers = req.ModelOptions.NGPULayers
opts.MainGPU = req.ModelOptions.MainGPU
opts.TensorSplit = req.ModelOptions.TensorSplit
return opts
}
func getPredictOpts(req api.GenerateRequest) llama.PredictOptions {
var opts llama.PredictOptions
if req.PredictOptions.Threads == -1 {
opts.Threads = runtime.NumCPU()
} else {
opts.Threads = req.PredictOptions.Threads
}
opts.Seed = req.PredictOptions.Seed
opts.Tokens = req.PredictOptions.Tokens
opts.Penalty = req.PredictOptions.Penalty
opts.Repeat = req.PredictOptions.Repeat
opts.Batch = req.PredictOptions.Batch
opts.NKeep = req.PredictOptions.NKeep
opts.TopK = req.PredictOptions.TopK
opts.TopP = req.PredictOptions.TopP
opts.TailFreeSamplingZ = req.PredictOptions.TailFreeSamplingZ
opts.TypicalP = req.PredictOptions.TypicalP
opts.Temperature = req.PredictOptions.Temperature
opts.FrequencyPenalty = req.PredictOptions.FrequencyPenalty
opts.PresencePenalty = req.PredictOptions.PresencePenalty
opts.Mirostat = req.PredictOptions.Mirostat
opts.MirostatTAU = req.PredictOptions.MirostatTAU
opts.MirostatETA = req.PredictOptions.MirostatETA
opts.MMap = req.PredictOptions.MMap
return opts
}

View File

@@ -1,4 +1,6 @@
{{- if not .Context }}
Below is an instruction that describes a task. Write a response that appropriately completes the request.
{{- end }}
### Instruction:
{{ .Prompt }}

View File

@@ -1,3 +1,5 @@
{{- if not .Context }}
A helpful assistant who helps the user with any questions asked.
{{- end }}
User: {{ .Prompt }}
Assistant:

View File

@@ -1,4 +1,6 @@
{{- if not .Context }}
Below is an instruction that describes a task. Write a response that appropriately completes the request. Be concise. Once the request is completed, include no other text.
{{- end }}
### Instruction:
{{ .Prompt }}
### Response:

View File

@@ -1,5 +1,7 @@
{{- if not .Context }}
### System:
You are an AI assistant that follows instruction extremely well. Help as much as you can.
{{- end }}
### User:
{{ .Prompt }}

View File

@@ -1,4 +1,6 @@
{{ if not .Context }}
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
{{- end }}
USER: {{ .Prompt }}
ASSISTANT:

View File

@@ -1,4 +1,6 @@
{{- if not .Context }}
Below is an instruction that describes a task. Write a response that appropriately completes the request
{{- end }}
### Instruction: {{ .Prompt }}

View File

@@ -0,0 +1,17 @@
import { Analytics } from '@segment/analytics-node'
import { v4 as uuid } from 'uuid'
const analytics = new Analytics({ writeKey: process.env.TELEMETRY_WRITE_KEY || '<empty>' })
export async function POST(req: Request) {
const { email } = await req.json()
analytics.identify({
anonymousId: uuid(),
traits: {
email,
},
})
return new Response(null, { status: 200 })
}

View File

@@ -1,44 +1,42 @@
import { NextResponse } from 'next/server'
import semver from 'semver'
import { Octokit } from '@octokit/rest'
import { RequestError } from '@octokit/types'
const octokit = new Octokit()
export async function GET(req: Request) {
const { searchParams } = new URL(req.url)
const os = searchParams.get('os') || ''
const version = searchParams.get('version') || ''
const os = searchParams.get('os') || 'darwin'
const version = searchParams.get('version') || '0.0.0'
if (!version) {
return new Response('not found', { status: 404 })
}
try {
const { data } = await octokit.repos.getLatestRelease({
owner: 'jmorganca',
repo: 'ollama',
})
const res = await fetch('https://api.github.com/repos/jmorganca/ollama/releases', { next: { revalidate: 60 } })
const data = await res.json()
// todo: get the correct asset for the current arch/os
const asset = data.assets.find(a => a.name.toLowerCase().includes(os))
if (!asset) {
return new Response('not found', { status: 404 })
}
if (semver.lt(version, data.tag_name)) {
return NextResponse.json({ version: data.tag_name, url: asset.browser_download_url })
}
return new Response('up to date', { status: 204 })
} catch (error) {
const e = error as RequestError
if (e.status === 404) {
return new Response('not found', { status: 404 })
}
return new Response('internal server error', { status: 500 })
if (data.length === 0) {
return new Response('not found', { status: 404 })
}
const latest = data[0]
const assets = latest.assets || []
if (assets.length === 0) {
return new Response('not found', { status: 404 })
}
// todo: get the correct asset for the current arch/os
const asset = assets.find((a: any) => a.name.toLowerCase().includes(os) && a.name.toLowerCase().includes('.zip'))
if (!asset) {
return new Response('not found', { status: 404 })
}
console.log(asset)
if (semver.lt(version, latest.tag_name)) {
return NextResponse.json({ version: data.tag_name, url: asset.browser_download_url })
}
return new Response(null, { status: 204 })
}

View File

@@ -0,0 +1,11 @@
'use client'
import { useEffect } from 'react'
export default function Downloader({ url }: { url: string }) {
useEffect(() => {
window.location.href = url
}, [])
return null
}

View File

@@ -1,20 +1,47 @@
import { Octokit } from '@octokit/rest'
import { redirect } from 'next/navigation'
import Image from 'next/image'
const octokit = new Octokit()
import Header from '../header'
import Downloader from './downloader'
import Signup from './signup'
export default async function Download() {
const { data } = await octokit.repos.getLatestRelease({
owner: 'jmorganca',
repo: 'ollama',
})
const res = await fetch('https://api.github.com/repos/jmorganca/ollama/releases', { next: { revalidate: 60 } })
const data = await res.json()
// todo: get the correct asset for the current arch/os
const asset = data.assets.find(a => a.name.toLowerCase().includes('darwin') && a.name.toLowerCase().includes('.zip'))
if (asset) {
redirect(asset.browser_download_url)
if (data.length === 0) {
return null
}
return null
const latest = data[0]
const assets = latest.assets || []
if (assets.length === 0) {
return null
}
// todo: get the correct asset for the current arch/os
const asset = assets.find(
(a: any) => a.name.toLowerCase().includes('darwin') && a.name.toLowerCase().includes('.zip')
)
if (!asset) {
return null
}
return (
<>
<Header />
<main className='flex min-h-screen max-w-6xl flex-col py-20 px-16 lg:p-32 items-center mx-auto'>
<Image src='/ollama.png' width={64} height={64} alt='ollamaIcon' />
<section className='mt-12 mb-8 text-center'>
<h2 className='my-2 max-w-md text-3xl tracking-tight'>Downloading...</h2>
<h3 className='text-base text-neutral-500 mt-12 max-w-[16rem]'>
While Ollama downloads, sign up to get notified of new updates.
</h3>
<Downloader url={asset.browser_download_url} />
</section>
<Signup />
</main>
</>
)
}

View File

@@ -0,0 +1,51 @@
'use client'
import { useState } from 'react'
export default function Signup() {
const [email, setEmail] = useState('')
const [submitting, setSubmitting] = useState(false)
const [success, setSuccess] = useState(false)
return (
<form
onSubmit={async e => {
e.preventDefault()
setSubmitting(true)
await fetch('/api/signup', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ email }),
})
setSubmitting(false)
setSuccess(true)
setEmail('')
return false
}}
className='flex self-stretch flex-col gap-3 h-32 md:mx-40 lg:mx-72'
>
<input
required
autoFocus
value={email}
onChange={e => setEmail(e.target.value)}
type='email'
placeholder='your@email.com'
className='border border-neutral-200 rounded-lg px-4 py-2 focus:outline-none placeholder-neutral-300'
/>
<input
type='submit'
value='Get updates'
disabled={submitting}
className='bg-black text-white disabled:text-neutral-200 disabled:bg-neutral-700 rounded-full px-4 py-2 focus:outline-none cursor-pointer'
/>
{success && <p className='text-center text-sm'>You&apos;re signed up for updates</p>}
</form>
)
}

26
web/app/header.tsx Normal file
View File

@@ -0,0 +1,26 @@
import Link from "next/link"
const navigation = [
{ name: 'Discord', href: 'https://discord.gg/MrfB5FbNWN' },
{ name: 'Github', href: 'https://github.com/jmorganca/ollama' },
{ name: 'Download', href: '/download' },
]
export default function Header() {
return (
<header className="absolute inset-x-0 top-0 z-50">
<nav className="mx-auto flex items-center justify-between px-10 py-4">
<Link className="flex-1 font-bold" href="/">
Ollama
</Link>
<div className="flex space-x-8">
{navigation.map((item) => (
<Link key={item.name} href={item.href} className="text-sm leading-6 text-gray-900">
{item.name}
</Link>
))}
</div>
</nav>
</header >
)
}

View File

@@ -8,7 +8,7 @@ export const metadata = {
export default function RootLayout({ children }: { children: React.ReactNode }) {
return (
<html lang='en'>
<body>{children}</body>
<body className='antialiased'>{children}</body>
</html>
)
}

View File

@@ -1,34 +1,32 @@
import { AiFillApple } from 'react-icons/ai'
import Image from 'next/image'
import Link from 'next/link'
import models from '../../models.json'
import Header from './header'
export default async function Home() {
return (
<main className='flex min-h-screen max-w-2xl flex-col p-4 lg:p-24'>
<img src='/ollama.png' className='w-20 h-auto' />
<section className='my-4'>
<p className='my-3 max-w-md'>
<a className='underline' href='https://github.com/jmorganca/ollama'>
Ollama
</a>{' '}
is a tool for running large language models, currently for macOS with Windows and Linux coming soon.
<br />
<br />
<a href='/download' target='_blank'>
<button className='bg-black text-white text-sm py-2 px-3 rounded-lg flex items-center gap-2'>
<AiFillApple className='h-auto w-5 relative -top-px' /> Download for macOS
</button>
</a>
</p>
</section>
<section className='my-4'>
<h2 className='mb-4 text-lg'>Example models you can try running:</h2>
{models.map(m => (
<div className='my-2 grid font-mono' key={m.name}>
<code className='py-0.5'>ollama run {m.name}</code>
<>
<Header />
<main className='flex min-h-screen max-w-6xl flex-col py-20 px-16 md:p-32 items-center mx-auto'>
<Image src='/ollama.png' width={64} height={64} alt='ollamaIcon' />
<section className='my-12 text-center'>
<div className='flex flex-col space-y-2'>
<h2 className='md:max-w-[18rem] mx-auto my-2 text-3xl tracking-tight'>Portable large language models</h2>
<h3 className='md:max-w-xs mx-auto text-base text-neutral-500'>
Bundle a models weights, configuration, prompts, data and more into self-contained packages that run anywhere.
</h3>
</div>
))}
</section>
</main>
<div className='mx-auto flex flex-col space-y-4 mt-12'>
<Link href='/download' className='md:mx-10 lg:mx-14 bg-black text-white rounded-full px-4 py-2 focus:outline-none cursor-pointer'>
Download
</Link>
<p className='text-neutral-500 text-sm '>
Available for macOS with Apple Silicon <br />
Windows & Linux support coming soon.
</p>
</div>
</section>
</main>
</>
)
}

331
web/package-lock.json generated
View File

@@ -10,6 +10,7 @@
"dependencies": {
"@octokit/rest": "^19.0.13",
"@octokit/types": "^11.0.0",
"@segment/analytics-node": "^1.0.0",
"@types/node": "20.4.0",
"@types/react": "18.2.14",
"@types/react-dom": "18.2.6",
@@ -17,17 +18,21 @@
"encoding": "^0.1.13",
"eslint": "8.44.0",
"eslint-config-next": "13.4.7",
"next": "13.4.7",
"next": "13.4.9",
"postcss": "8.4.24",
"react": "18.2.0",
"react-dom": "18.2.0",
"react-icons": "^4.10.1",
"semver": "^7.5.3",
"tailwindcss": "3.3.2",
"typescript": "5.1.6"
"typescript": "5.1.6",
"uuid": "^9.0.0"
},
"devDependencies": {
"@types/semver": "^7.5.0"
"@types/semver": "^7.5.0",
"@types/uuid": "^9.0.2",
"prettier": "^3.0.0",
"prettier-plugin-tailwindcss": "^0.4.0"
}
},
"node_modules/@aashutoshrathi/word-wrap": {
@@ -190,10 +195,29 @@
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
"integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw=="
},
"node_modules/@lukeed/csprng": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@lukeed/csprng/-/csprng-1.1.0.tgz",
"integrity": "sha512-Z7C/xXCiGWsg0KuKsHTKJxbWhpI3Vs5GwLfOean7MGyVFGqdRgBbAjOCh6u4bbjPc/8MJ2pZmK/0DLdCbivLDA==",
"engines": {
"node": ">=8"
}
},
"node_modules/@lukeed/uuid": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@lukeed/uuid/-/uuid-2.0.1.tgz",
"integrity": "sha512-qC72D4+CDdjGqJvkFMMEAtancHUQ7/d/tAiHf64z8MopFDmcrtbcJuerDtFceuAfQJ2pDSfCKCtbqoGBNnwg0w==",
"dependencies": {
"@lukeed/csprng": "^1.1.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/@next/env": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/env/-/env-13.4.7.tgz",
"integrity": "sha512-ZlbiFulnwiFsW9UV1ku1OvX/oyIPLtMk9p/nnvDSwI0s7vSoZdRtxXNsaO+ZXrLv/pMbXVGq4lL8TbY9iuGmVw=="
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/env/-/env-13.4.9.tgz",
"integrity": "sha512-vuDRK05BOKfmoBYLNi2cujG2jrYbEod/ubSSyqgmEx9n/W3eZaJQdRNhTfumO+qmq/QTzLurW487n/PM/fHOkw=="
},
"node_modules/@next/eslint-plugin-next": {
"version": "13.4.7",
@@ -204,9 +228,9 @@
}
},
"node_modules/@next/swc-darwin-arm64": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.4.7.tgz",
"integrity": "sha512-VZTxPv1b59KGiv/pZHTO5Gbsdeoxcj2rU2cqJu03btMhHpn3vwzEK0gUSVC/XW96aeGO67X+cMahhwHzef24/w==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-13.4.9.tgz",
"integrity": "sha512-TVzGHpZoVBk3iDsTOQA/R6MGmFp0+17SWXMEWd6zG30AfuELmSSMe2SdPqxwXU0gbpWkJL1KgfLzy5ReN0crqQ==",
"cpu": [
"arm64"
],
@@ -219,9 +243,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.7.tgz",
"integrity": "sha512-gO2bw+2Ymmga+QYujjvDz9955xvYGrWofmxTq7m70b9pDPvl7aDFABJOZ2a8SRCuSNB5mXU8eTOmVVwyp/nAew==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-13.4.9.tgz",
"integrity": "sha512-aSfF1fhv28N2e7vrDZ6zOQ+IIthocfaxuMWGReB5GDriF0caTqtHttAvzOMgJgXQtQx6XhyaJMozLTSEXeNN+A==",
"cpu": [
"x64"
],
@@ -234,9 +258,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.7.tgz",
"integrity": "sha512-6cqp3vf1eHxjIDhEOc7Mh/s8z1cwc/l5B6ZNkOofmZVyu1zsbEM5Hmx64s12Rd9AYgGoiCz4OJ4M/oRnkE16/Q==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-13.4.9.tgz",
"integrity": "sha512-JhKoX5ECzYoTVyIy/7KykeO4Z2lVKq7HGQqvAH+Ip9UFn1MOJkOnkPRB7v4nmzqAoY+Je05Aj5wNABR1N18DMg==",
"cpu": [
"arm64"
],
@@ -249,9 +273,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.7.tgz",
"integrity": "sha512-T1kD2FWOEy5WPidOn1si0rYmWORNch4a/NR52Ghyp4q7KyxOCuiOfZzyhVC5tsLIBDH3+cNdB5DkD9afpNDaOw==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-13.4.9.tgz",
"integrity": "sha512-OOn6zZBIVkm/4j5gkPdGn4yqQt+gmXaLaSjRSO434WplV8vo2YaBNbSHaTM9wJpZTHVDYyjzuIYVEzy9/5RVZw==",
"cpu": [
"arm64"
],
@@ -264,9 +288,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.7.tgz",
"integrity": "sha512-zaEC+iEiAHNdhl6fuwl0H0shnTzQoAoJiDYBUze8QTntE/GNPfTYpYboxF5LRYIjBwETUatvE0T64W6SKDipvg==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-13.4.9.tgz",
"integrity": "sha512-iA+fJXFPpW0SwGmx/pivVU+2t4zQHNOOAr5T378PfxPHY6JtjV6/0s1vlAJUdIHeVpX98CLp9k5VuKgxiRHUpg==",
"cpu": [
"x64"
],
@@ -279,9 +303,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.7.tgz",
"integrity": "sha512-X6r12F8d8SKAtYJqLZBBMIwEqcTRvUdVm+xIq+l6pJqlgT2tNsLLf2i5Cl88xSsIytBICGsCNNHd+siD2fbWBA==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-13.4.9.tgz",
"integrity": "sha512-rlNf2WUtMM+GAQrZ9gMNdSapkVi3koSW3a+dmBVp42lfugWVvnyzca/xJlN48/7AGx8qu62WyO0ya1ikgOxh6A==",
"cpu": [
"x64"
],
@@ -294,9 +318,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.7.tgz",
"integrity": "sha512-NPnmnV+vEIxnu6SUvjnuaWRglZzw4ox5n/MQTxeUhb5iwVWFedolPFebMNwgrWu4AELwvTdGtWjqof53AiWHcw==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-13.4.9.tgz",
"integrity": "sha512-5T9ybSugXP77nw03vlgKZxD99AFTHaX8eT1ayKYYnGO9nmYhJjRPxcjU5FyYI+TdkQgEpIcH7p/guPLPR0EbKA==",
"cpu": [
"arm64"
],
@@ -309,9 +333,9 @@
}
},
"node_modules/@next/swc-win32-ia32-msvc": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.7.tgz",
"integrity": "sha512-6Hxijm6/a8XqLQpOOf/XuwWRhcuc/g4rBB2oxjgCMuV9Xlr2bLs5+lXyh8w9YbAUMYR3iC9mgOlXbHa79elmXw==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-13.4.9.tgz",
"integrity": "sha512-ojZTCt1lP2ucgpoiFgrFj07uq4CZsq4crVXpLGgQfoFq00jPKRPgesuGPaz8lg1yLfvafkU3Jd1i8snKwYR3LA==",
"cpu": [
"ia32"
],
@@ -324,9 +348,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.7.tgz",
"integrity": "sha512-sW9Yt36Db1nXJL+mTr2Wo0y+VkPWeYhygvcHj1FF0srVtV+VoDjxleKtny21QHaG05zdeZnw2fCtf2+dEqgwqA==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-13.4.9.tgz",
"integrity": "sha512-QbT03FXRNdpuL+e9pLnu+XajZdm/TtIXVYY4lA9t+9l0fLZbHXDYEKitAqxrOj37o3Vx5ufxiRAniaIebYDCgw==",
"cpu": [
"x64"
],
@@ -599,6 +623,31 @@
"resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.3.2.tgz",
"integrity": "sha512-V+MvGwaHH03hYhY+k6Ef/xKd6RYlc4q8WBx+2ANmipHJcKuktNcI/NgEsJgdSUF6Lw32njT6OnrRsKYCdgHjYw=="
},
"node_modules/@segment/analytics-core": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/@segment/analytics-core/-/analytics-core-1.3.0.tgz",
"integrity": "sha512-ujScWZH49NK1hYlp2/EMw45nOPEh+pmTydAnR6gSkRNucZD4fuinvpPL03rmFCw8ibaMuKLAdgPJfQ0gkLKZ5A==",
"dependencies": {
"@lukeed/uuid": "^2.0.0",
"dset": "^3.1.2",
"tslib": "^2.4.1"
}
},
"node_modules/@segment/analytics-node": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/@segment/analytics-node/-/analytics-node-1.0.0.tgz",
"integrity": "sha512-UWFujSxRkRauZuMVF4MPOT5QPvX4i7kiC2QCsozHhltoTiR2SBWRI86cYO/JI/Uk7qKaOxxGFDkJarCyIP7uLA==",
"dependencies": {
"@lukeed/uuid": "^2.0.0",
"@segment/analytics-core": "1.3.0",
"buffer": "^6.0.3",
"node-fetch": "^2.6.7",
"tslib": "^2.4.1"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@swc/helpers": {
"version": "0.5.1",
"resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.1.tgz",
@@ -651,6 +700,12 @@
"integrity": "sha512-G8hZ6XJiHnuhQKR7ZmysCeJWE08o8T0AXtk5darsCaTVsYZhhgUrq53jizaR2FvsoeCwJhlmwTjkXBY5Pn/ZHw==",
"dev": true
},
"node_modules/@types/uuid": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-9.0.2.tgz",
"integrity": "sha512-kNnC1GFBLuhImSnV7w4njQkUiJi0ZXUycu1rUaouPqiKlXkh77JKgdRnTAp1x5eBwcIwbtI+3otwzuIDEuDoxQ==",
"dev": true
},
"node_modules/@typescript-eslint/parser": {
"version": "5.60.1",
"resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-5.60.1.tgz",
@@ -991,6 +1046,25 @@
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
},
"node_modules/base64-js": {
"version": "1.5.1",
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
"integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/before-after-hook": {
"version": "2.2.3",
"resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.2.3.tgz",
@@ -1074,6 +1148,29 @@
"node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
}
},
"node_modules/buffer": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/buffer/-/buffer-6.0.3.tgz",
"integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"dependencies": {
"base64-js": "^1.3.1",
"ieee754": "^1.2.1"
}
},
"node_modules/bundle-name": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-3.0.0.tgz",
@@ -1390,6 +1487,14 @@
"node": ">=6.0.0"
}
},
"node_modules/dset": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/dset/-/dset-3.1.2.tgz",
"integrity": "sha512-g/M9sqy3oHe477Ar4voQxWtaPIFw1jTdKZuomOjhCcBx9nHUNn0pu6NopuFFrTh/TRZIKEj+76vLWFu9BNKk+Q==",
"engines": {
"node": ">=4"
}
},
"node_modules/electron-to-chromium": {
"version": "1.4.447",
"resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.447.tgz",
@@ -1747,9 +1852,9 @@
}
},
"node_modules/eslint-plugin-import/node_modules/semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
@@ -1784,9 +1889,9 @@
}
},
"node_modules/eslint-plugin-jsx-a11y/node_modules/semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
@@ -1858,9 +1963,9 @@
}
},
"node_modules/eslint-plugin-react/node_modules/semver": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
"integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
@@ -2385,6 +2490,25 @@
"node": ">=0.10.0"
}
},
"node_modules/ieee754": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
"integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
]
},
"node_modules/ignore": {
"version": "5.2.4",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
@@ -2983,11 +3107,11 @@
"integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw=="
},
"node_modules/next": {
"version": "13.4.7",
"resolved": "https://registry.npmjs.org/next/-/next-13.4.7.tgz",
"integrity": "sha512-M8z3k9VmG51SRT6v5uDKdJXcAqLzP3C+vaKfLIAM0Mhx1um1G7MDnO63+m52qPdZfrTFzMZNzfsgvm3ghuVHIQ==",
"version": "13.4.9",
"resolved": "https://registry.npmjs.org/next/-/next-13.4.9.tgz",
"integrity": "sha512-vtefFm/BWIi/eWOqf1GsmKG3cjKw1k3LjuefKRcL3iiLl3zWzFdPG3as6xtxrGO6gwTzzaO1ktL4oiHt/uvTjA==",
"dependencies": {
"@next/env": "13.4.7",
"@next/env": "13.4.9",
"@swc/helpers": "0.5.1",
"busboy": "1.6.0",
"caniuse-lite": "^1.0.30001406",
@@ -3003,15 +3127,15 @@
"node": ">=16.8.0"
},
"optionalDependencies": {
"@next/swc-darwin-arm64": "13.4.7",
"@next/swc-darwin-x64": "13.4.7",
"@next/swc-linux-arm64-gnu": "13.4.7",
"@next/swc-linux-arm64-musl": "13.4.7",
"@next/swc-linux-x64-gnu": "13.4.7",
"@next/swc-linux-x64-musl": "13.4.7",
"@next/swc-win32-arm64-msvc": "13.4.7",
"@next/swc-win32-ia32-msvc": "13.4.7",
"@next/swc-win32-x64-msvc": "13.4.7"
"@next/swc-darwin-arm64": "13.4.9",
"@next/swc-darwin-x64": "13.4.9",
"@next/swc-linux-arm64-gnu": "13.4.9",
"@next/swc-linux-arm64-musl": "13.4.9",
"@next/swc-linux-x64-gnu": "13.4.9",
"@next/swc-linux-x64-musl": "13.4.9",
"@next/swc-win32-arm64-msvc": "13.4.9",
"@next/swc-win32-ia32-msvc": "13.4.9",
"@next/swc-win32-x64-msvc": "13.4.9"
},
"peerDependencies": {
"@opentelemetry/api": "^1.1.0",
@@ -3521,6 +3645,95 @@
"node": ">= 0.8.0"
}
},
"node_modules/prettier": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/prettier/-/prettier-3.0.0.tgz",
"integrity": "sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==",
"dev": true,
"bin": {
"prettier": "bin/prettier.cjs"
},
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/prettier/prettier?sponsor=1"
}
},
"node_modules/prettier-plugin-tailwindcss": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.4.0.tgz",
"integrity": "sha512-Rna0sDPETA0KNhMHlN8wxKNgfSa8mTl2hPPAGxnbv6tUcHT6J4RQmQ8TLXyhB7Dm5Von4iHloBxTyClYM6wT0A==",
"dev": true,
"engines": {
"node": ">=12.17.0"
},
"peerDependencies": {
"@ianvs/prettier-plugin-sort-imports": "*",
"@prettier/plugin-pug": "*",
"@shopify/prettier-plugin-liquid": "*",
"@shufo/prettier-plugin-blade": "*",
"@trivago/prettier-plugin-sort-imports": "*",
"prettier": "^2.2 || ^3.0",
"prettier-plugin-astro": "*",
"prettier-plugin-css-order": "*",
"prettier-plugin-import-sort": "*",
"prettier-plugin-jsdoc": "*",
"prettier-plugin-marko": "*",
"prettier-plugin-organize-attributes": "*",
"prettier-plugin-organize-imports": "*",
"prettier-plugin-style-order": "*",
"prettier-plugin-svelte": "*",
"prettier-plugin-twig-melody": "*"
},
"peerDependenciesMeta": {
"@ianvs/prettier-plugin-sort-imports": {
"optional": true
},
"@prettier/plugin-pug": {
"optional": true
},
"@shopify/prettier-plugin-liquid": {
"optional": true
},
"@shufo/prettier-plugin-blade": {
"optional": true
},
"@trivago/prettier-plugin-sort-imports": {
"optional": true
},
"prettier-plugin-astro": {
"optional": true
},
"prettier-plugin-css-order": {
"optional": true
},
"prettier-plugin-import-sort": {
"optional": true
},
"prettier-plugin-jsdoc": {
"optional": true
},
"prettier-plugin-marko": {
"optional": true
},
"prettier-plugin-organize-attributes": {
"optional": true
},
"prettier-plugin-organize-imports": {
"optional": true
},
"prettier-plugin-style-order": {
"optional": true
},
"prettier-plugin-svelte": {
"optional": true
},
"prettier-plugin-twig-melody": {
"optional": true
}
}
},
"node_modules/prop-types": {
"version": "15.8.1",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
@@ -4360,6 +4573,14 @@
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
},
"node_modules/uuid": {
"version": "9.0.0",
"resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz",
"integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==",
"bin": {
"uuid": "dist/bin/uuid"
}
},
"node_modules/watchpack": {
"version": "2.4.0",
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz",

View File

@@ -10,6 +10,7 @@
"dependencies": {
"@octokit/rest": "^19.0.13",
"@octokit/types": "^11.0.0",
"@segment/analytics-node": "^1.0.0",
"@types/node": "20.4.0",
"@types/react": "18.2.14",
"@types/react-dom": "18.2.6",
@@ -17,16 +18,20 @@
"encoding": "^0.1.13",
"eslint": "8.44.0",
"eslint-config-next": "13.4.7",
"next": "13.4.7",
"next": "13.4.9",
"postcss": "8.4.24",
"react": "18.2.0",
"react-dom": "18.2.0",
"react-icons": "^4.10.1",
"semver": "^7.5.3",
"tailwindcss": "3.3.2",
"typescript": "5.1.6"
"typescript": "5.1.6",
"uuid": "^9.0.0"
},
"devDependencies": {
"@types/semver": "^7.5.0"
"@types/semver": "^7.5.0",
"@types/uuid": "^9.0.2",
"prettier": "^3.0.0",
"prettier-plugin-tailwindcss": "^0.4.0"
}
}

5
web/vercel.json Normal file
View File

@@ -0,0 +1,5 @@
{
"github": {
"silent": true
}
}