Compare commits
170 Commits
royh-opena
...
v0.2.0
Author | SHA1 | Date | |
---|---|---|---|
![]() |
53da2c6965 | ||
![]() |
d8def1ff94 | ||
![]() |
571dc61955 | ||
![]() |
0e09c380fc | ||
![]() |
0ee87615c7 | ||
![]() |
f8241bfba3 | ||
![]() |
4607c70641 | ||
![]() |
c12f1c5b99 | ||
![]() |
a08f20d910 | ||
![]() |
6cea036027 | ||
![]() |
5796bfc401 | ||
![]() |
f1a379aa56 | ||
![]() |
9ae146993e | ||
![]() |
e0348d3fe8 | ||
![]() |
2cc854f8cb | ||
![]() |
5304b765b2 | ||
![]() |
4fd5f3526a | ||
![]() |
842f85f758 | ||
![]() |
9d30f9f8b3 | ||
![]() |
631cfd9e62 | ||
![]() |
78fb33dd07 | ||
![]() |
8f8e736b13 | ||
![]() |
d89454de80 | ||
![]() |
af28b94533 | ||
![]() |
e9188e971a | ||
![]() |
78eddfc068 | ||
![]() |
02c24d3d01 | ||
![]() |
52abc8acb7 | ||
![]() |
4d71c559b2 | ||
![]() |
0d16eb310e | ||
![]() |
8072e205ff | ||
![]() |
955f2a4e03 | ||
![]() |
3c75113e37 | ||
![]() |
ccd7785859 | ||
![]() |
3b5a4a77f3 | ||
![]() |
daed0634a9 | ||
![]() |
0d4dd707bc | ||
![]() |
0e982bc1f4 | ||
![]() |
6298f49816 | ||
![]() |
ef757da2c9 | ||
![]() |
e5352297d9 | ||
![]() |
65a5040e09 | ||
![]() |
d626b99b54 | ||
![]() |
dddb58a38b | ||
![]() |
400056e154 | ||
![]() |
d2f19024d0 | ||
![]() |
69c04eecc4 | ||
![]() |
996bb1b85e | ||
![]() |
422dcc3856 | ||
![]() |
020bd60ab2 | ||
![]() |
8e277b72bb | ||
![]() |
4f67b39d26 | ||
![]() |
2425281317 | ||
![]() |
0403e9860e | ||
![]() |
33a65e3ba3 | ||
![]() |
88bcd79bb9 | ||
![]() |
7e571f95f0 | ||
![]() |
da8e2a0447 | ||
![]() |
a30915bde1 | ||
![]() |
58e3fff311 | ||
![]() |
3f0b309ad4 | ||
![]() |
e70610ef06 | ||
![]() |
dfded7e075 | ||
![]() |
173b550438 | ||
![]() |
cff3f44f4a | ||
![]() |
26e4e66faf | ||
![]() |
97c9e11768 | ||
![]() |
3518aaef33 | ||
![]() |
1963c00201 | ||
![]() |
27402cb7a2 | ||
![]() |
c1218199cf | ||
![]() |
717f7229eb | ||
![]() |
aae56abb7c | ||
![]() |
5f034f5b63 | ||
![]() |
b910fa9010 | ||
![]() |
6d4219083c | ||
![]() |
1ed4f521c4 | ||
![]() |
de2163dafd | ||
![]() |
9bd00041fa | ||
![]() |
4e986a823c | ||
![]() |
2cc7d05012 | ||
![]() |
123a722a6f | ||
![]() |
4d311eb731 | ||
![]() |
cb42e607c5 | ||
![]() |
2aa91a937b | ||
![]() |
ccef9431c8 | ||
![]() |
642cee1342 | ||
![]() |
9a9e7d83c4 | ||
![]() |
9929751cc8 | ||
![]() |
17b7186cd7 | ||
![]() |
189a43caa2 | ||
![]() |
e835ef1836 | ||
![]() |
7e7749224c | ||
![]() |
c7c2f3bc22 | ||
![]() |
54a79d6a8a | ||
![]() |
5bf5aeec01 | ||
![]() |
e01e535cbb | ||
![]() |
0195d6a2f8 | ||
![]() |
8e0641a9bf | ||
![]() |
662568d453 | ||
![]() |
4ebb66c662 | ||
![]() |
23e899f32d | ||
![]() |
fedf71635e | ||
![]() |
97c59be653 | ||
![]() |
9d8a4988e8 | ||
![]() |
1ae0750a21 | ||
![]() |
9d91e5e587 | ||
![]() |
96624aa412 | ||
![]() |
10f33b8537 | ||
![]() |
4a633cc295 | ||
![]() |
d34d88e417 | ||
![]() |
52ce350b7a | ||
![]() |
2abebb2cbe | ||
![]() |
380e06e5be | ||
![]() |
badf975e45 | ||
![]() |
755b4e4fc2 | ||
![]() |
1a1c99e334 | ||
![]() |
21adf8b6d2 | ||
![]() |
e873841cbb | ||
![]() |
26d0bf9236 | ||
![]() |
359b15a597 | ||
![]() |
b55958a587 | ||
![]() |
7784ca33ce | ||
![]() |
c9c8c98bf6 | ||
![]() |
171796791f | ||
![]() |
176d0f7075 | ||
![]() |
8ed51cac37 | ||
![]() |
c9e6f0542d | ||
![]() |
b0930626c5 | ||
![]() |
e890be4814 | ||
![]() |
b2799f111b | ||
![]() |
152fc202f5 | ||
![]() |
4ad0d4d6d3 | ||
![]() |
163cd3e77c | ||
![]() |
4c2c8f93dd | ||
![]() |
fd1e6e0590 | ||
![]() |
89c79bec8c | ||
![]() |
c7b77004e3 | ||
![]() |
07d143f412 | ||
![]() |
a12283e2ff | ||
![]() |
4b0050cf0e | ||
![]() |
0577af98f4 | ||
![]() |
17ce203a26 | ||
![]() |
d76555ffb5 | ||
![]() |
2786dff5d3 | ||
![]() |
225f0d1219 | ||
![]() |
532db58311 | ||
![]() |
6be309e1bd | ||
![]() |
da3bf23354 | ||
![]() |
26ab67732b | ||
![]() |
45cacbaf05 | ||
![]() |
17df6520c8 | ||
![]() |
6f351bf586 | ||
![]() |
ff4f0cbd1d | ||
![]() |
fc37c192ae | ||
![]() |
434dfe30c5 | ||
![]() |
4e2b7e181d | ||
![]() |
48702dd149 | ||
![]() |
68dfc6236a | ||
![]() |
5e8ff556cb | ||
![]() |
6fd04ca922 | ||
![]() |
206797bda4 | ||
![]() |
43ed358f9a | ||
![]() |
b32ebb4f29 | ||
![]() |
fb9cdfa723 | ||
![]() |
efac488675 | ||
![]() |
6b800aa7b7 | ||
![]() |
dd7c9ebeaf | ||
![]() |
4dc7fb9525 | ||
![]() |
157f09acdf |
35
.github/workflows/release.yaml
vendored
35
.github/workflows/release.yaml
vendored
@@ -304,6 +304,11 @@ jobs:
|
||||
write-host "Installing plugin"
|
||||
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||
write-host "plugin installed"
|
||||
- name: remove unwanted mingw dll.a files
|
||||
run: |
|
||||
Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force
|
||||
Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force
|
||||
Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
@@ -437,6 +442,7 @@ jobs:
|
||||
env:
|
||||
OLLAMA_SKIP_IMAGE_BUILD: '1'
|
||||
PUSH: '1'
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set Version
|
||||
@@ -460,15 +466,20 @@ jobs:
|
||||
ls -lh dist/
|
||||
(cd dist; sha256sum * > sha256sum.txt)
|
||||
cat dist/sha256sum.txt
|
||||
- uses: ncipollo/release-action@v1
|
||||
with:
|
||||
name: ${{ env.RELEASE_VERSION }}
|
||||
allowUpdates: true
|
||||
artifacts: 'dist/*'
|
||||
draft: true
|
||||
prerelease: true
|
||||
omitBodyDuringUpdate: true
|
||||
generateReleaseNotes: true
|
||||
omitDraftDuringUpdate: true
|
||||
omitPrereleaseDuringUpdate: true
|
||||
replacesArtifacts: true
|
||||
- name: Create or update Release
|
||||
run: |
|
||||
echo "Looking for existing release for ${{ env.RELEASE_VERSION }}"
|
||||
OLD_TAG=$(gh release ls --json name,tagName | jq -r ".[] | select(.name == \"${{ env.RELEASE_VERSION }}\") | .tagName")
|
||||
if [ -n "$OLD_TAG" ]; then
|
||||
echo "Updating release ${{ env.RELEASE_VERSION }} to point to new tag ${GITHUB_REF_NAME}"
|
||||
gh release edit ${OLD_TAG} --tag ${GITHUB_REF_NAME}
|
||||
else
|
||||
echo "Creating new release ${{ env.RELEASE_VERSION }} pointing to tag ${GITHUB_REF_NAME}"
|
||||
gh release create ${GITHUB_REF_NAME} \
|
||||
--title ${{ env.RELEASE_VERSION }} \
|
||||
--draft \
|
||||
--generate-notes \
|
||||
--prerelease
|
||||
fi
|
||||
echo "Uploading artifacts for tag ${GITHUB_REF_NAME}"
|
||||
gh release upload ${GITHUB_REF_NAME} dist/* --clobber
|
||||
|
4
.github/workflows/test.yaml
vendored
4
.github/workflows/test.yaml
vendored
@@ -58,6 +58,7 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: '1'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
@@ -79,6 +80,7 @@ jobs:
|
||||
- run: go generate -x ./...
|
||||
if: ${{ ! startsWith(matrix.os, 'windows-') }}
|
||||
name: 'Unix Go Generate'
|
||||
- run: go build .
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
||||
@@ -124,7 +126,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
rocm-version:
|
||||
- '6.0.2'
|
||||
- '6.1.1'
|
||||
runs-on: linux
|
||||
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
||||
steps:
|
||||
|
@@ -2,7 +2,7 @@ ARG GOLANG_VERSION=1.22.1
|
||||
ARG CMAKE_VERSION=3.22.1
|
||||
# this CUDA_VERSION corresponds with the one specified in docs/gpu.md
|
||||
ARG CUDA_VERSION=11.3.1
|
||||
ARG ROCM_VERSION=6.0.2
|
||||
ARG ROCM_VERSION=6.1.1
|
||||
|
||||
# Copy the minimal context we need to run the generate scripts
|
||||
FROM scratch AS llm-code
|
||||
@@ -70,12 +70,12 @@ RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx" sh gen_linux.sh
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64
|
||||
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx2" sh gen_linux.sh
|
||||
|
||||
FROM --platform=linux/arm64 centos:7 AS cpu-builder-arm64
|
||||
FROM --platform=linux/arm64 rockylinux:8 AS cpu-builder-arm64
|
||||
ARG CMAKE_VERSION
|
||||
ARG GOLANG_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||
ARG CGO_CFLAGS
|
||||
|
11
README.md
11
README.md
@@ -53,8 +53,8 @@ Here are some example models that can be downloaded:
|
||||
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
||||
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
||||
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
||||
| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` |
|
||||
| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
@@ -182,6 +182,12 @@ $ ollama run llama3 "Summarize this file: $(cat README.md)"
|
||||
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||
```
|
||||
|
||||
### Show model information
|
||||
|
||||
```
|
||||
ollama show llama3
|
||||
```
|
||||
|
||||
### List models on your computer
|
||||
|
||||
```
|
||||
@@ -286,6 +292,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
|
||||
- [Ollama with Google Mesop](https://github.com/rapidarchitect/ollama_mesop/) (Mesop Chat Client implementation with Ollama)
|
||||
|
||||
### Terminal
|
||||
|
||||
|
73
api/types.go
73
api/types.go
@@ -159,18 +159,18 @@ type Options struct {
|
||||
|
||||
// Runner options which must be set when the model is loaded into memory
|
||||
type Runner struct {
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
NumBatch int `json:"num_batch,omitempty"`
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
MainGPU int `json:"main_gpu,omitempty"`
|
||||
LowVRAM bool `json:"low_vram,omitempty"`
|
||||
F16KV bool `json:"f16_kv,omitempty"`
|
||||
LogitsAll bool `json:"logits_all,omitempty"`
|
||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||
UseMMap bool `json:"use_mmap,omitempty"`
|
||||
UseMLock bool `json:"use_mlock,omitempty"`
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
NumBatch int `json:"num_batch,omitempty"`
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
MainGPU int `json:"main_gpu,omitempty"`
|
||||
LowVRAM bool `json:"low_vram,omitempty"`
|
||||
F16KV bool `json:"f16_kv,omitempty"`
|
||||
LogitsAll bool `json:"logits_all,omitempty"`
|
||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||
UseMMap *bool `json:"use_mmap,omitempty"`
|
||||
UseMLock bool `json:"use_mlock,omitempty"`
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
}
|
||||
|
||||
// EmbeddingRequest is the request passed to [Client.Embeddings].
|
||||
@@ -222,6 +222,7 @@ type ShowRequest struct {
|
||||
Model string `json:"model"`
|
||||
System string `json:"system"`
|
||||
Template string `json:"template"`
|
||||
Verbose bool `json:"verbose"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
|
||||
@@ -231,13 +232,16 @@ type ShowRequest struct {
|
||||
|
||||
// ShowResponse is the response returned from [Client.Show].
|
||||
type ShowResponse struct {
|
||||
License string `json:"license,omitempty"`
|
||||
Modelfile string `json:"modelfile,omitempty"`
|
||||
Parameters string `json:"parameters,omitempty"`
|
||||
Template string `json:"template,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
License string `json:"license,omitempty"`
|
||||
Modelfile string `json:"modelfile,omitempty"`
|
||||
Parameters string `json:"parameters,omitempty"`
|
||||
Template string `json:"template,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
ModelInfo map[string]any `json:"model_info,omitempty"`
|
||||
ProjectorInfo map[string]any `json:"projector_info,omitempty"`
|
||||
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
||||
}
|
||||
|
||||
// CopyRequest is the request passed to [Client.Copy].
|
||||
@@ -310,6 +314,13 @@ type ProcessModelResponse struct {
|
||||
SizeVRAM int64 `json:"size_vram"`
|
||||
}
|
||||
|
||||
type RetrieveModelResponse struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
@@ -448,6 +459,17 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
slice[i] = str
|
||||
}
|
||||
field.Set(reflect.ValueOf(slice))
|
||||
case reflect.Pointer:
|
||||
var b bool
|
||||
if field.Type() == reflect.TypeOf(&b) {
|
||||
val, ok := val.(bool)
|
||||
if !ok {
|
||||
return fmt.Errorf("option %q must be of type boolean", key)
|
||||
}
|
||||
field.Set(reflect.ValueOf(&val))
|
||||
} else {
|
||||
return fmt.Errorf("unknown type loading config params: %v %v", field.Kind(), field.Type())
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
||||
}
|
||||
@@ -490,7 +512,7 @@ func DefaultOptions() Options {
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMLock: false,
|
||||
UseMMap: true,
|
||||
UseMMap: nil,
|
||||
UseNUMA: false,
|
||||
},
|
||||
}
|
||||
@@ -587,6 +609,17 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) {
|
||||
case reflect.Slice:
|
||||
// TODO: only string slices are supported right now
|
||||
out[key] = vals
|
||||
case reflect.Pointer:
|
||||
var b bool
|
||||
if field.Type() == reflect.TypeOf(&b) {
|
||||
boolVal, err := strconv.ParseBool(vals[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid bool value %s", vals)
|
||||
}
|
||||
out[key] = &boolVal
|
||||
} else {
|
||||
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -105,3 +106,105 @@ func TestDurationMarshalUnmarshal(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUseMmapParsingFromJSON(t *testing.T) {
|
||||
tr := true
|
||||
fa := false
|
||||
tests := []struct {
|
||||
name string
|
||||
req string
|
||||
exp *bool
|
||||
}{
|
||||
{
|
||||
name: "Undefined",
|
||||
req: `{ }`,
|
||||
exp: nil,
|
||||
},
|
||||
{
|
||||
name: "True",
|
||||
req: `{ "use_mmap": true }`,
|
||||
exp: &tr,
|
||||
},
|
||||
{
|
||||
name: "False",
|
||||
req: `{ "use_mmap": false }`,
|
||||
exp: &fa,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var oMap map[string]interface{}
|
||||
err := json.Unmarshal([]byte(test.req), &oMap)
|
||||
require.NoError(t, err)
|
||||
opts := DefaultOptions()
|
||||
err = opts.FromMap(oMap)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.exp, opts.UseMMap)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUseMmapFormatParams(t *testing.T) {
|
||||
tr := true
|
||||
fa := false
|
||||
tests := []struct {
|
||||
name string
|
||||
req map[string][]string
|
||||
exp *bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "True",
|
||||
req: map[string][]string{
|
||||
"use_mmap": {"true"},
|
||||
},
|
||||
exp: &tr,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "False",
|
||||
req: map[string][]string{
|
||||
"use_mmap": {"false"},
|
||||
},
|
||||
exp: &fa,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "Numeric True",
|
||||
req: map[string][]string{
|
||||
"use_mmap": {"1"},
|
||||
},
|
||||
exp: &tr,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "Numeric False",
|
||||
req: map[string][]string{
|
||||
"use_mmap": {"0"},
|
||||
},
|
||||
exp: &fa,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
name: "invalid string",
|
||||
req: map[string][]string{
|
||||
"use_mmap": {"foo"},
|
||||
},
|
||||
exp: nil,
|
||||
err: fmt.Errorf("invalid bool value [foo]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resp, err := FormatParams(test.req)
|
||||
require.Equal(t, test.err, err)
|
||||
respVal, ok := resp["use_mmap"]
|
||||
if test.exp != nil {
|
||||
assert.True(t, ok, "resp: %v", resp)
|
||||
assert.Equal(t, *test.exp, *respVal.(*bool))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -5,6 +5,8 @@ import (
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
@@ -24,6 +26,7 @@ func InitLogging() {
|
||||
logFile = os.Stderr
|
||||
// TODO - write one-line to the app.log file saying we're running in console mode to help avoid confusion
|
||||
} else {
|
||||
rotateLogs(AppLogFile)
|
||||
logFile, err = os.OpenFile(AppLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to create server log %v", err))
|
||||
@@ -46,3 +49,32 @@ func InitLogging() {
|
||||
|
||||
slog.Info("ollama app started")
|
||||
}
|
||||
|
||||
func rotateLogs(logFile string) {
|
||||
if _, err := os.Stat(logFile); os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
index := strings.LastIndex(logFile, ".")
|
||||
pre := logFile[:index]
|
||||
post := "." + logFile[index+1:]
|
||||
for i := LogRotationCount; i > 0; i-- {
|
||||
older := pre + "-" + strconv.Itoa(i) + post
|
||||
newer := pre + "-" + strconv.Itoa(i-1) + post
|
||||
if i == 1 {
|
||||
newer = pre + post
|
||||
}
|
||||
if _, err := os.Stat(newer); err == nil {
|
||||
if _, err := os.Stat(older); err == nil {
|
||||
err := os.Remove(older)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to remove older log", "older", older, "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
err := os.Rename(newer, older)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to rotate log", "older", older, "newer", newer, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
44
app/lifecycle/logging_test.go
Normal file
44
app/lifecycle/logging_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRotateLogs(t *testing.T) {
|
||||
logDir := t.TempDir()
|
||||
logFile := filepath.Join(logDir, "testlog.log")
|
||||
|
||||
// No log exists
|
||||
rotateLogs(logFile)
|
||||
|
||||
require.NoError(t, os.WriteFile(logFile, []byte("1"), 0644))
|
||||
assert.FileExists(t, logFile)
|
||||
// First rotation
|
||||
rotateLogs(logFile)
|
||||
assert.FileExists(t, filepath.Join(logDir, "testlog-1.log"))
|
||||
assert.NoFileExists(t, filepath.Join(logDir, "testlog-2.log"))
|
||||
assert.NoFileExists(t, logFile)
|
||||
|
||||
// Should be a no-op without a new log
|
||||
rotateLogs(logFile)
|
||||
assert.FileExists(t, filepath.Join(logDir, "testlog-1.log"))
|
||||
assert.NoFileExists(t, filepath.Join(logDir, "testlog-2.log"))
|
||||
assert.NoFileExists(t, logFile)
|
||||
|
||||
for i := 2; i <= LogRotationCount+1; i++ {
|
||||
require.NoError(t, os.WriteFile(logFile, []byte(strconv.Itoa(i)), 0644))
|
||||
assert.FileExists(t, logFile)
|
||||
rotateLogs(logFile)
|
||||
assert.NoFileExists(t, logFile)
|
||||
for j := 1; j < i; j++ {
|
||||
assert.FileExists(t, filepath.Join(logDir, "testlog-"+strconv.Itoa(j)+".log"))
|
||||
}
|
||||
assert.NoFileExists(t, filepath.Join(logDir, "testlog-"+strconv.Itoa(i+1)+".log"))
|
||||
}
|
||||
}
|
@@ -16,11 +16,12 @@ var (
|
||||
AppDir = "/opt/Ollama"
|
||||
AppDataDir = "/opt/Ollama"
|
||||
// TODO - should there be a distinct log dir?
|
||||
UpdateStageDir = "/tmp"
|
||||
AppLogFile = "/tmp/ollama_app.log"
|
||||
ServerLogFile = "/tmp/ollama.log"
|
||||
UpgradeLogFile = "/tmp/ollama_update.log"
|
||||
Installer = "OllamaSetup.exe"
|
||||
UpdateStageDir = "/tmp"
|
||||
AppLogFile = "/tmp/ollama_app.log"
|
||||
ServerLogFile = "/tmp/ollama.log"
|
||||
UpgradeLogFile = "/tmp/ollama_update.log"
|
||||
Installer = "OllamaSetup.exe"
|
||||
LogRotationCount = 5
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@@ -54,7 +54,7 @@ func start(ctx context.Context, command string) (*exec.Cmd, error) {
|
||||
return nil, fmt.Errorf("failed to spawn server stderr pipe: %w", err)
|
||||
}
|
||||
|
||||
// TODO - rotation
|
||||
rotateLogs(ServerLogFile)
|
||||
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create server log: %w", err)
|
||||
|
@@ -88,10 +88,15 @@ DialogFontSize=12
|
||||
[Files]
|
||||
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
||||
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windows-{#ARCH}\*.dll"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windows-{#ARCH}\ollama_runners\*"; DestDir: "{app}\ollama_runners"; Flags: ignoreversion 64bit recursesubdirs
|
||||
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
||||
#if DirExists("..\dist\windows-amd64\cuda")
|
||||
Source: "..\dist\windows-amd64\cuda\*"; DestDir: "{app}\cuda\"; Flags: ignoreversion recursesubdirs
|
||||
#endif
|
||||
#if DirExists("..\dist\windows-amd64\oneapi")
|
||||
Source: "..\dist\windows-amd64\oneapi\*"; DestDir: "{app}\oneapi\"; Flags: ignoreversion recursesubdirs
|
||||
#endif
|
||||
#if DirExists("..\dist\windows-amd64\rocm")
|
||||
Source: "..\dist\windows-amd64\rocm\*"; DestDir: "{app}\rocm\"; Flags: ignoreversion recursesubdirs
|
||||
#endif
|
||||
|
218
cmd/cmd.go
218
cmd/cmd.go
@@ -162,9 +162,6 @@ func tempZipFiles(path string) (string, error) {
|
||||
}
|
||||
defer tempfile.Close()
|
||||
|
||||
zipfile := zip.NewWriter(tempfile)
|
||||
defer zipfile.Close()
|
||||
|
||||
detectContentType := func(path string) (string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
@@ -233,6 +230,9 @@ func tempZipFiles(path string) (string, error) {
|
||||
files = append(files, tks...)
|
||||
}
|
||||
|
||||
zipfile := zip.NewWriter(tempfile)
|
||||
defer zipfile.Close()
|
||||
|
||||
for _, file := range files {
|
||||
f, err := os.Open(file)
|
||||
if err != nil {
|
||||
@@ -287,38 +287,12 @@ func createBlob(cmd *cobra.Command, client *api.Client, path string) (string, er
|
||||
}
|
||||
|
||||
func RunHandler(cmd *cobra.Command, args []string) error {
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
name := args[0]
|
||||
|
||||
// check if the model exists on the server
|
||||
show, err := client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
||||
var statusError api.StatusError
|
||||
switch {
|
||||
case errors.As(err, &statusError) && statusError.StatusCode == http.StatusNotFound:
|
||||
if err := PullHandler(cmd, []string{name}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
show, err = client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
interactive := true
|
||||
|
||||
opts := runOptions{
|
||||
Model: args[0],
|
||||
WordWrap: os.Getenv("TERM") == "xterm-256color",
|
||||
Options: map[string]interface{}{},
|
||||
MultiModal: slices.Contains(show.Details.Families, "clip"),
|
||||
ParentModel: show.Details.ParentModel,
|
||||
Model: args[0],
|
||||
WordWrap: os.Getenv("TERM") == "xterm-256color",
|
||||
Options: map[string]interface{}{},
|
||||
}
|
||||
|
||||
format, err := cmd.Flags().GetString("format")
|
||||
@@ -362,11 +336,38 @@ func RunHandler(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
opts.WordWrap = !nowrap
|
||||
|
||||
if !interactive {
|
||||
return generate(cmd, opts)
|
||||
// Fill out the rest of the options based on information about the
|
||||
// model.
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return generateInteractive(cmd, opts)
|
||||
name := args[0]
|
||||
info, err := func() (*api.ShowResponse, error) {
|
||||
showReq := &api.ShowRequest{Name: name}
|
||||
info, err := client.Show(cmd.Context(), showReq)
|
||||
var se api.StatusError
|
||||
if errors.As(err, &se) && se.StatusCode == http.StatusNotFound {
|
||||
if err := PullHandler(cmd, []string{name}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
||||
}
|
||||
return info, err
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts.MultiModal = slices.Contains(info.Details.Families, "clip")
|
||||
opts.ParentModel = info.Details.ParentModel
|
||||
opts.Messages = append(opts.Messages, info.Messages...)
|
||||
|
||||
if interactive {
|
||||
return generateInteractive(cmd, opts)
|
||||
}
|
||||
return generate(cmd, opts)
|
||||
}
|
||||
|
||||
func errFromUnknownKey(unknownKeyErr error) error {
|
||||
@@ -579,10 +580,6 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.New("missing model name")
|
||||
}
|
||||
|
||||
license, errLicense := cmd.Flags().GetBool("license")
|
||||
modelfile, errModelfile := cmd.Flags().GetBool("modelfile")
|
||||
parameters, errParams := cmd.Flags().GetBool("parameters")
|
||||
@@ -625,8 +622,6 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
if flagsSet > 1 {
|
||||
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
||||
} else if flagsSet == 0 {
|
||||
return errors.New("one of '--license', '--modelfile', '--parameters', '--system', or '--template' must be specified")
|
||||
}
|
||||
|
||||
req := api.ShowRequest{Name: args[0]}
|
||||
@@ -635,22 +630,141 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
switch showType {
|
||||
case "license":
|
||||
fmt.Println(resp.License)
|
||||
case "modelfile":
|
||||
fmt.Println(resp.Modelfile)
|
||||
case "parameters":
|
||||
fmt.Println(resp.Parameters)
|
||||
case "system":
|
||||
fmt.Println(resp.System)
|
||||
case "template":
|
||||
fmt.Println(resp.Template)
|
||||
if flagsSet == 1 {
|
||||
switch showType {
|
||||
case "license":
|
||||
fmt.Println(resp.License)
|
||||
case "modelfile":
|
||||
fmt.Println(resp.Modelfile)
|
||||
case "parameters":
|
||||
fmt.Println(resp.Parameters)
|
||||
case "system":
|
||||
fmt.Println(resp.System)
|
||||
case "template":
|
||||
fmt.Println(resp.Template)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
showInfo(resp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func showInfo(resp *api.ShowResponse) {
|
||||
arch := resp.ModelInfo["general.architecture"].(string)
|
||||
|
||||
modelData := [][]string{
|
||||
{"arch", arch},
|
||||
{"parameters", resp.Details.ParameterSize},
|
||||
{"quantization", resp.Details.QuantizationLevel},
|
||||
{"context length", fmt.Sprintf("%v", resp.ModelInfo[fmt.Sprintf("%s.context_length", arch)].(float64))},
|
||||
{"embedding length", fmt.Sprintf("%v", resp.ModelInfo[fmt.Sprintf("%s.embedding_length", arch)].(float64))},
|
||||
}
|
||||
|
||||
mainTableData := [][]string{
|
||||
{"Model"},
|
||||
{renderSubTable(modelData, false)},
|
||||
}
|
||||
|
||||
if resp.ProjectorInfo != nil {
|
||||
projectorData := [][]string{
|
||||
{"arch", "clip"},
|
||||
{"parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))},
|
||||
}
|
||||
|
||||
if projectorType, ok := resp.ProjectorInfo["clip.projector_type"]; ok {
|
||||
projectorData = append(projectorData, []string{"projector type", projectorType.(string)})
|
||||
}
|
||||
|
||||
projectorData = append(projectorData,
|
||||
[]string{"embedding length", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.embedding_length"].(float64))},
|
||||
[]string{"projection dimensionality", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.projection_dim"].(float64))},
|
||||
)
|
||||
|
||||
mainTableData = append(mainTableData,
|
||||
[]string{"Projector"},
|
||||
[]string{renderSubTable(projectorData, false)},
|
||||
)
|
||||
}
|
||||
|
||||
if resp.Parameters != "" {
|
||||
mainTableData = append(mainTableData, []string{"Parameters"}, []string{formatParams(resp.Parameters)})
|
||||
}
|
||||
|
||||
if resp.System != "" {
|
||||
mainTableData = append(mainTableData, []string{"System"}, []string{renderSubTable(twoLines(resp.System), true)})
|
||||
}
|
||||
|
||||
if resp.License != "" {
|
||||
mainTableData = append(mainTableData, []string{"License"}, []string{renderSubTable(twoLines(resp.License), true)})
|
||||
}
|
||||
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetAutoWrapText(false)
|
||||
table.SetBorder(false)
|
||||
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||
|
||||
for _, v := range mainTableData {
|
||||
table.Append(v)
|
||||
}
|
||||
|
||||
table.Render()
|
||||
}
|
||||
|
||||
func renderSubTable(data [][]string, file bool) string {
|
||||
var buf bytes.Buffer
|
||||
table := tablewriter.NewWriter(&buf)
|
||||
table.SetAutoWrapText(!file)
|
||||
table.SetBorder(false)
|
||||
table.SetNoWhiteSpace(true)
|
||||
table.SetTablePadding("\t")
|
||||
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||
|
||||
for _, v := range data {
|
||||
table.Append(v)
|
||||
}
|
||||
|
||||
table.Render()
|
||||
|
||||
renderedTable := buf.String()
|
||||
lines := strings.Split(renderedTable, "\n")
|
||||
for i, line := range lines {
|
||||
lines[i] = "\t" + line
|
||||
}
|
||||
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
func twoLines(s string) [][]string {
|
||||
lines := strings.Split(s, "\n")
|
||||
res := [][]string{}
|
||||
|
||||
count := 0
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
count++
|
||||
res = append(res, []string{line})
|
||||
if count == 2 {
|
||||
return res
|
||||
}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func formatParams(s string) string {
|
||||
lines := strings.Split(s, "\n")
|
||||
table := [][]string{}
|
||||
|
||||
for _, line := range lines {
|
||||
table = append(table, strings.Fields(line))
|
||||
}
|
||||
return renderSubTable(table, false)
|
||||
}
|
||||
|
||||
func CopyHandler(cmd *cobra.Command, args []string) error {
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
|
@@ -31,65 +31,40 @@ const (
|
||||
)
|
||||
|
||||
func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := progress.NewProgress(os.Stderr)
|
||||
defer p.StopAndClear()
|
||||
|
||||
spinner := progress.NewSpinner("")
|
||||
p.Add("", spinner)
|
||||
|
||||
showReq := api.ShowRequest{Name: opts.Model}
|
||||
showResp, err := client.Show(cmd.Context(), &showReq)
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.MultiModal = slices.Contains(showResp.Details.Families, "clip")
|
||||
opts.ParentModel = showResp.Details.ParentModel
|
||||
|
||||
if len(showResp.Messages) > 0 {
|
||||
opts.Messages = append(opts.Messages, showResp.Messages...)
|
||||
}
|
||||
|
||||
chatReq := &api.ChatRequest{
|
||||
Model: opts.Model,
|
||||
Messages: []api.Message{},
|
||||
Model: opts.Model,
|
||||
KeepAlive: opts.KeepAlive,
|
||||
}
|
||||
|
||||
if opts.KeepAlive != nil {
|
||||
chatReq.KeepAlive = opts.KeepAlive
|
||||
}
|
||||
|
||||
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
||||
return client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
||||
p.StopAndClear()
|
||||
if len(opts.Messages) > 0 {
|
||||
for _, msg := range opts.Messages {
|
||||
switch msg.Role {
|
||||
case "user":
|
||||
fmt.Printf(">>> %s\n", msg.Content)
|
||||
case "assistant":
|
||||
state := &displayResponseState{}
|
||||
displayResponse(msg.Content, opts.WordWrap, state)
|
||||
fmt.Println()
|
||||
fmt.Println()
|
||||
}
|
||||
for _, msg := range opts.Messages {
|
||||
switch msg.Role {
|
||||
case "user":
|
||||
fmt.Printf(">>> %s\n", msg.Content)
|
||||
case "assistant":
|
||||
state := &displayResponseState{}
|
||||
displayResponse(msg.Content, opts.WordWrap, state)
|
||||
fmt.Println()
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
opts.Messages = make([]api.Message, 0)
|
||||
|
||||
err := loadModel(cmd, &opts)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -429,15 +404,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
|
||||
switch args[1] {
|
||||
case "info":
|
||||
fmt.Println("Model details:")
|
||||
if len(resp.Details.Families) > 0 {
|
||||
fmt.Printf("Family %s\n", strings.Join(resp.Details.Families, ", "))
|
||||
} else if resp.Details.Family != "" {
|
||||
fmt.Printf("Family %s\n", resp.Details.Family)
|
||||
}
|
||||
fmt.Printf("Parameter Size %s\n", resp.Details.ParameterSize)
|
||||
fmt.Printf("Quantization Level %s\n", resp.Details.QuantizationLevel)
|
||||
fmt.Println("")
|
||||
showInfo(resp)
|
||||
case "license":
|
||||
if resp.License == "" {
|
||||
fmt.Println("No license was specified for this model.")
|
||||
|
39
docs/api.md
39
docs/api.md
@@ -26,7 +26,7 @@ All durations are returned in nanoseconds.
|
||||
|
||||
### Streaming responses
|
||||
|
||||
Certain endpoints stream responses as JSON objects and can optional return non-streamed responses.
|
||||
Certain endpoints stream responses as JSON objects. Streaming can be disabled by providing `{"stream": false}` for these endpoints.
|
||||
|
||||
## Generate a completion
|
||||
|
||||
@@ -777,11 +777,12 @@ A single JSON object will be returned.
|
||||
POST /api/show
|
||||
```
|
||||
|
||||
Show information about a model including details, modelfile, template, parameters, license, and system prompt.
|
||||
Show information about a model including details, modelfile, template, parameters, license, system prompt.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to show
|
||||
- `verbose`: (optional) if set to `true`, returns full data for verbose response fields
|
||||
|
||||
### Examples
|
||||
|
||||
@@ -798,14 +799,40 @@ curl http://localhost:11434/api/show -d '{
|
||||
```json
|
||||
{
|
||||
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSISTANT:\"",
|
||||
"parameters": "num_ctx 4096\nstop \u003c/s\u003e\nstop USER:\nstop ASSISTANT:",
|
||||
"template": "{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: ",
|
||||
"parameters": "num_keep 24\nstop \"<|start_header_id|>\"\nstop \"<|end_header_id|>\"\nstop \"<|eot_id|>\"",
|
||||
"template": "{{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|>",
|
||||
"details": {
|
||||
"parent_model": "",
|
||||
"format": "gguf",
|
||||
"family": "llama",
|
||||
"families": ["llama", "clip"],
|
||||
"parameter_size": "7B",
|
||||
"families": [
|
||||
"llama"
|
||||
],
|
||||
"parameter_size": "8.0B",
|
||||
"quantization_level": "Q4_0"
|
||||
},
|
||||
"model_info": {
|
||||
"general.architecture": "llama",
|
||||
"general.file_type": 2,
|
||||
"general.parameter_count": 8030261248,
|
||||
"general.quantization_version": 2,
|
||||
"llama.attention.head_count": 32,
|
||||
"llama.attention.head_count_kv": 8,
|
||||
"llama.attention.layer_norm_rms_epsilon": 0.00001,
|
||||
"llama.block_count": 32,
|
||||
"llama.context_length": 8192,
|
||||
"llama.embedding_length": 4096,
|
||||
"llama.feed_forward_length": 14336,
|
||||
"llama.rope.dimension_count": 128,
|
||||
"llama.rope.freq_base": 500000,
|
||||
"llama.vocab_size": 128256,
|
||||
"tokenizer.ggml.bos_token_id": 128000,
|
||||
"tokenizer.ggml.eos_token_id": 128009,
|
||||
"tokenizer.ggml.merges": [], // populates if `verbose=true`
|
||||
"tokenizer.ggml.model": "gpt2",
|
||||
"tokenizer.ggml.pre": "llama-bpe",
|
||||
"tokenizer.ggml.token_type": [], // populates if `verbose=true`
|
||||
"tokenizer.ggml.tokens": [] // populates if `verbose=true`
|
||||
}
|
||||
}
|
||||
```
|
||||
|
@@ -104,7 +104,7 @@ like to use. For example, to compile an optimized binary for an Intel i9-9880H,
|
||||
you might use:
|
||||
|
||||
```
|
||||
OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./...
|
||||
OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on" go generate ./...
|
||||
go build .
|
||||
```
|
||||
|
||||
@@ -114,15 +114,18 @@ If you have Docker available, you can build linux binaries with `./scripts/build
|
||||
|
||||
### Windows
|
||||
|
||||
Note: The windows build for Ollama is still under development.
|
||||
Note: The Windows build for Ollama is still under development.
|
||||
|
||||
Install required tools:
|
||||
First, install required tools:
|
||||
|
||||
- MSVC toolchain - C/C++ and cmake as minimal requirements
|
||||
- Go version 1.22 or higher
|
||||
- MinGW (pick one variant) with GCC.
|
||||
- [MinGW-w64](https://www.mingw-w64.org/)
|
||||
- [MSYS2](https://www.msys2.org/)
|
||||
- The `ThreadJob` Powershell module: `Install-Module -Name ThreadJob -Scope CurrentUser`
|
||||
|
||||
Then, build the `ollama` binary:
|
||||
|
||||
```powershell
|
||||
$env:CGO_ENABLED="1"
|
||||
|
16
docs/faq.md
16
docs/faq.md
@@ -257,3 +257,19 @@ If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` AP
|
||||
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||
|
||||
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||
|
||||
## How does Ollama handle concurrent requests?
|
||||
|
||||
Ollama supports two levels of concurrent processing. If your system has sufficient available memory (system memory when using CPU inference, or VRAM for GPU inference) then multiple models can be loaded at the same time. For a given model, if there is sufficient available memory when the model is loaded, it is configured to allow parallel request processing.
|
||||
|
||||
If there is insufficient available memory to load a new model request while one or more models are already loaded, all new requests will be queued until the new model can be loaded. As prior models become idle, one or more will be unloaded to make room for the new model. Queued requests will be processed in order. When using GPU inference new models must be able to completely fit in VRAM to allow concurrent model loads.
|
||||
|
||||
Parallel request processing for a given model results in increasing the context size by the number of parallel requests. For example, a 2K context with 4 parallel requests will result in an 8K context and additional memory allocation.
|
||||
|
||||
The following server settings may be used to adjust how Ollama handles concurrent requests on most platforms:
|
||||
|
||||
- `OLLAMA_MAX_LOADED_MODELS` - The maximum number of models that can be loaded concurrently provided they fit in available memory. The default is 3 * the number of GPUs or 3 for CPU inference.
|
||||
- `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory.
|
||||
- `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512
|
||||
|
||||
Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM.
|
@@ -8,7 +8,7 @@ Check your compute compatibility to see if your card is supported:
|
||||
| Compute Capability | Family | Cards |
|
||||
| ------------------ | ------------------- | ----------------------------------------------------------------------------------------------------------- |
|
||||
| 9.0 | NVIDIA | `H100` |
|
||||
| 8.9 | GeForce RTX 40xx | `RTX 4090` `RTX 4080` `RTX 4070 Ti` `RTX 4060 Ti` |
|
||||
| 8.9 | GeForce RTX 40xx | `RTX 4090` `RTX 4080 SUPER` `RTX 4080` `RTX 4070 Ti SUPER` `RTX 4070 Ti` `RTX 4070 SUPER` `RTX 4070` `RTX 4060 Ti` `RTX 4060` |
|
||||
| | NVIDIA Professional | `L4` `L40` `RTX 6000` |
|
||||
| 8.6 | GeForce RTX 30xx | `RTX 3090 Ti` `RTX 3090` `RTX 3080 Ti` `RTX 3080` `RTX 3070 Ti` `RTX 3070` `RTX 3060 Ti` `RTX 3060` |
|
||||
| | NVIDIA Professional | `A40` `RTX A6000` `RTX A5000` `RTX A4000` `RTX A3000` `RTX A2000` `A10` `A16` `A2` |
|
||||
@@ -18,7 +18,7 @@ Check your compute compatibility to see if your card is supported:
|
||||
| | Quadro | `RTX 8000` `RTX 6000` `RTX 5000` `RTX 4000` |
|
||||
| 7.0 | NVIDIA | `TITAN V` `V100` `Quadro GV100` |
|
||||
| 6.1 | NVIDIA TITAN | `TITAN Xp` `TITAN X` |
|
||||
| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050` |
|
||||
| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050 Ti` `GTX 1050` |
|
||||
| | Quadro | `P6000` `P5200` `P4200` `P3200` `P5000` `P4000` `P3000` `P2200` `P2000` `P1000` `P620` `P600` `P500` `P520` |
|
||||
| | Tesla | `P40` `P4` |
|
||||
| 6.0 | NVIDIA | `Tesla P100` `Quadro GP100` |
|
||||
|
@@ -47,19 +47,13 @@ success
|
||||
|
||||
### Supported Quantizations
|
||||
|
||||
<details>
|
||||
<summary>Legacy Quantization</summary>
|
||||
|
||||
- `Q4_0`
|
||||
- `Q4_1`
|
||||
- `Q5_0`
|
||||
- `Q5_1`
|
||||
- `Q8_0`
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>K-means Quantization</summary>`
|
||||
#### K-means Quantizations
|
||||
|
||||
- `Q3_K_S`
|
||||
- `Q3_K_M`
|
||||
@@ -70,11 +64,6 @@ success
|
||||
- `Q5_K_M`
|
||||
- `Q6_K`
|
||||
|
||||
</details>
|
||||
|
||||
> [!NOTE]
|
||||
> Activation-aware Weight Quantization (i.e. IQ) are not currently supported for automatic quantization however you can still import the quantized model into Ollama, see [Import GGUF](#import-gguf).
|
||||
|
||||
## Template Detection
|
||||
|
||||
> [!NOTE]
|
||||
|
@@ -65,6 +65,7 @@ curl http://localhost:11434/v1/chat/completions \
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
```
|
||||
|
||||
## Endpoints
|
||||
@@ -104,8 +105,6 @@ curl http://localhost:11434/v1/chat/completions \
|
||||
|
||||
#### Notes
|
||||
|
||||
- Setting `seed` will always set `temperature` to `0`
|
||||
- `finish_reason` will always be `stop`
|
||||
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
||||
|
||||
## Models
|
||||
|
@@ -22,7 +22,7 @@ docker logs <container-name>
|
||||
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||
|
||||
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs. The most recent server logs will be in `server.log` and older logs will be in `server-#.log`
|
||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
||||
@@ -70,14 +70,18 @@ curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
||||
|
||||
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
||||
|
||||
## Container fails to run on NVIDIA GPU
|
||||
## NVIDIA GPU Discovery
|
||||
|
||||
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||
When Ollama starts up, it takes inventory of the GPUs present in the system to determine compatibility and how much VRAM is available. Sometimes this discovery can fail to find your GPUs. In general, running the latest driver will yield the best results.
|
||||
|
||||
Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||
### Linux NVIDIA Troubleshooting
|
||||
|
||||
- Is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU.
|
||||
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
||||
If you are using a container to run Ollama, make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||
|
||||
Sometimes the Ollama can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||
|
||||
- If you are using a container, is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU.
|
||||
- Is the uvm driver loaded? `sudo nvidia-modprobe -u`
|
||||
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||
- Try rebooting
|
||||
- Make sure you're running the latest nvidia drivers
|
||||
@@ -85,3 +89,8 @@ Sometimes the container runtime can have difficulties initializing the GPU. When
|
||||
If none of those resolve the problem, gather additional information and file an issue:
|
||||
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
||||
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
||||
|
||||
|
||||
## Windows Terminal Errors
|
||||
|
||||
Older versions of Windows 10 (e.g., 21H1) are known to have a bug where the standard terminal program does not display control characters correctly. This can result in a long string of strings like `←[?25h←[?25l` being displayed, sometimes erroring with `The parameter is incorrect` To resolve this problem, please update to Win 10 22H1 or newer.
|
||||
|
@@ -19,7 +19,7 @@ Logs will often be helpful in diagnosing the problem (see
|
||||
|
||||
## System Requirements
|
||||
|
||||
* Windows 10 or newer, Home or Pro
|
||||
* Windows 10 22H2 or newer, Home or Pro
|
||||
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
||||
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
||||
|
||||
@@ -39,8 +39,8 @@ server.
|
||||
Ollama on Windows stores files in a few different locations. You can view them in
|
||||
the explorer window by hitting `<cmd>+R` and type in:
|
||||
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
||||
- *app.log* contains logs from the GUI application
|
||||
- *server.log* contains the server logs
|
||||
- *app.log* contains most resent logs from the GUI application
|
||||
- *server.log* contains the most recent server logs
|
||||
- *upgrade.log* contains log output for upgrades
|
||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
||||
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
||||
|
@@ -4,12 +4,14 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type OllamaHost struct {
|
||||
@@ -34,17 +36,17 @@ var (
|
||||
// Set via OLLAMA_HOST in the environment
|
||||
Host *OllamaHost
|
||||
// Set via OLLAMA_KEEP_ALIVE in the environment
|
||||
KeepAlive string
|
||||
KeepAlive time.Duration
|
||||
// Set via OLLAMA_LLM_LIBRARY in the environment
|
||||
LLMLibrary string
|
||||
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
||||
MaxRunners int
|
||||
// Set via OLLAMA_MAX_QUEUE in the environment
|
||||
MaxQueuedRequests int
|
||||
// Set via OLLAMA_MODELS in the environment
|
||||
ModelsDir string
|
||||
// Set via OLLAMA_MAX_VRAM in the environment
|
||||
MaxVRAM uint64
|
||||
// Set via OLLAMA_MODELS in the environment
|
||||
ModelsDir string
|
||||
// Set via OLLAMA_NOHISTORY in the environment
|
||||
NoHistory bool
|
||||
// Set via OLLAMA_NOPRUNE in the environment
|
||||
@@ -53,8 +55,23 @@ var (
|
||||
NumParallel int
|
||||
// Set via OLLAMA_RUNNERS_DIR in the environment
|
||||
RunnersDir string
|
||||
// Set via OLLAMA_SCHED_SPREAD in the environment
|
||||
SchedSpread bool
|
||||
// Set via OLLAMA_TMPDIR in the environment
|
||||
TmpDir string
|
||||
// Set via OLLAMA_INTEL_GPU in the environment
|
||||
IntelGpu bool
|
||||
|
||||
// Set via CUDA_VISIBLE_DEVICES in the environment
|
||||
CudaVisibleDevices string
|
||||
// Set via HIP_VISIBLE_DEVICES in the environment
|
||||
HipVisibleDevices string
|
||||
// Set via ROCR_VISIBLE_DEVICES in the environment
|
||||
RocrVisibleDevices string
|
||||
// Set via GPU_DEVICE_ORDINAL in the environment
|
||||
GpuDeviceOrdinal string
|
||||
// Set via HSA_OVERRIDE_GFX_VERSION in the environment
|
||||
HsaOverrideGfxVersion string
|
||||
)
|
||||
|
||||
type EnvVar struct {
|
||||
@@ -64,23 +81,33 @@ type EnvVar struct {
|
||||
}
|
||||
|
||||
func AsMap() map[string]EnvVar {
|
||||
return map[string]EnvVar{
|
||||
ret := map[string]EnvVar{
|
||||
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
|
||||
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU"},
|
||||
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
||||
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests"},
|
||||
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
||||
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
||||
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"},
|
||||
"OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir, "Location for temporary files"},
|
||||
}
|
||||
if runtime.GOOS != "darwin" {
|
||||
ret["CUDA_VISIBLE_DEVICES"] = EnvVar{"CUDA_VISIBLE_DEVICES", CudaVisibleDevices, "Set which NVIDIA devices are visible"}
|
||||
ret["HIP_VISIBLE_DEVICES"] = EnvVar{"HIP_VISIBLE_DEVICES", HipVisibleDevices, "Set which AMD devices are visible"}
|
||||
ret["ROCR_VISIBLE_DEVICES"] = EnvVar{"ROCR_VISIBLE_DEVICES", RocrVisibleDevices, "Set which AMD devices are visible"}
|
||||
ret["GPU_DEVICE_ORDINAL"] = EnvVar{"GPU_DEVICE_ORDINAL", GpuDeviceOrdinal, "Set which AMD devices are visible"}
|
||||
ret["HSA_OVERRIDE_GFX_VERSION"] = EnvVar{"HSA_OVERRIDE_GFX_VERSION", HsaOverrideGfxVersion, "Override the gfx used for all detected AMD GPUs"}
|
||||
ret["OLLAMA_INTEL_GPU"] = EnvVar{"OLLAMA_INTEL_GPU", IntelGpu, "Enable experimental Intel GPU detection"}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func Values() map[string]string {
|
||||
@@ -104,9 +131,10 @@ func clean(key string) string {
|
||||
|
||||
func init() {
|
||||
// default values
|
||||
NumParallel = 1
|
||||
MaxRunners = 1
|
||||
NumParallel = 0 // Autoselect
|
||||
MaxRunners = 0 // Autoselect
|
||||
MaxQueuedRequests = 512
|
||||
KeepAlive = 5 * time.Minute
|
||||
|
||||
LoadConfig()
|
||||
}
|
||||
@@ -180,8 +208,8 @@ func LoadConfig() {
|
||||
|
||||
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
||||
val, err := strconv.Atoi(onp)
|
||||
if err != nil || val <= 0 {
|
||||
slog.Error("invalid setting must be greater than zero", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
||||
if err != nil {
|
||||
slog.Error("invalid setting, ignoring", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
||||
} else {
|
||||
NumParallel = val
|
||||
}
|
||||
@@ -191,6 +219,15 @@ func LoadConfig() {
|
||||
NoHistory = true
|
||||
}
|
||||
|
||||
if spread := clean("OLLAMA_SCHED_SPREAD"); spread != "" {
|
||||
s, err := strconv.ParseBool(spread)
|
||||
if err == nil {
|
||||
SchedSpread = s
|
||||
} else {
|
||||
SchedSpread = true
|
||||
}
|
||||
}
|
||||
|
||||
if noprune := clean("OLLAMA_NOPRUNE"); noprune != "" {
|
||||
NoPrune = true
|
||||
}
|
||||
@@ -217,7 +254,7 @@ func LoadConfig() {
|
||||
if maxRunners != "" {
|
||||
m, err := strconv.Atoi(maxRunners)
|
||||
if err != nil {
|
||||
slog.Error("invalid setting", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
||||
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
||||
} else {
|
||||
MaxRunners = m
|
||||
}
|
||||
@@ -226,13 +263,16 @@ func LoadConfig() {
|
||||
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
||||
p, err := strconv.Atoi(onp)
|
||||
if err != nil || p <= 0 {
|
||||
slog.Error("invalid setting", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
||||
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
||||
} else {
|
||||
MaxQueuedRequests = p
|
||||
}
|
||||
}
|
||||
|
||||
KeepAlive = clean("OLLAMA_KEEP_ALIVE")
|
||||
ka := clean("OLLAMA_KEEP_ALIVE")
|
||||
if ka != "" {
|
||||
loadKeepAlive(ka)
|
||||
}
|
||||
|
||||
var err error
|
||||
ModelsDir, err = getModelsDir()
|
||||
@@ -244,6 +284,16 @@ func LoadConfig() {
|
||||
if err != nil {
|
||||
slog.Error("invalid setting", "OLLAMA_HOST", Host, "error", err, "using default port", Host.Port)
|
||||
}
|
||||
|
||||
if set, err := strconv.ParseBool(clean("OLLAMA_INTEL_GPU")); err == nil {
|
||||
IntelGpu = set
|
||||
}
|
||||
|
||||
CudaVisibleDevices = clean("CUDA_VISIBLE_DEVICES")
|
||||
HipVisibleDevices = clean("HIP_VISIBLE_DEVICES")
|
||||
RocrVisibleDevices = clean("ROCR_VISIBLE_DEVICES")
|
||||
GpuDeviceOrdinal = clean("GPU_DEVICE_ORDINAL")
|
||||
HsaOverrideGfxVersion = clean("HSA_OVERRIDE_GFX_VERSION")
|
||||
}
|
||||
|
||||
func getModelsDir() (string, error) {
|
||||
@@ -300,3 +350,24 @@ func getOllamaHost() (*OllamaHost, error) {
|
||||
Port: port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func loadKeepAlive(ka string) {
|
||||
v, err := strconv.Atoi(ka)
|
||||
if err != nil {
|
||||
d, err := time.ParseDuration(ka)
|
||||
if err == nil {
|
||||
if d < 0 {
|
||||
KeepAlive = time.Duration(math.MaxInt64)
|
||||
} else {
|
||||
KeepAlive = d
|
||||
}
|
||||
}
|
||||
} else {
|
||||
d := time.Duration(v) * time.Second
|
||||
if d < 0 {
|
||||
KeepAlive = time.Duration(math.MaxInt64)
|
||||
} else {
|
||||
KeepAlive = d
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -2,8 +2,10 @@ package envconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -23,6 +25,21 @@ func TestConfig(t *testing.T) {
|
||||
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
||||
LoadConfig()
|
||||
require.True(t, FlashAttention)
|
||||
t.Setenv("OLLAMA_KEEP_ALIVE", "")
|
||||
LoadConfig()
|
||||
require.Equal(t, 5*time.Minute, KeepAlive)
|
||||
t.Setenv("OLLAMA_KEEP_ALIVE", "3")
|
||||
LoadConfig()
|
||||
require.Equal(t, 3*time.Second, KeepAlive)
|
||||
t.Setenv("OLLAMA_KEEP_ALIVE", "1h")
|
||||
LoadConfig()
|
||||
require.Equal(t, 1*time.Hour, KeepAlive)
|
||||
t.Setenv("OLLAMA_KEEP_ALIVE", "-1s")
|
||||
LoadConfig()
|
||||
require.Equal(t, time.Duration(math.MaxInt64), KeepAlive)
|
||||
t.Setenv("OLLAMA_KEEP_ALIVE", "-1")
|
||||
LoadConfig()
|
||||
require.Equal(t, time.Duration(math.MaxInt64), KeepAlive)
|
||||
}
|
||||
|
||||
func TestClientFromEnvironment(t *testing.T) {
|
||||
|
220
gpu/amd_linux.go
220
gpu/amd_linux.go
@@ -13,6 +13,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
)
|
||||
|
||||
@@ -25,7 +26,16 @@ const (
|
||||
|
||||
// Prefix with the node dir
|
||||
GPUTotalMemoryFileGlob = "mem_banks/*/properties" // size_in_bytes line
|
||||
GPUUsedMemoryFileGlob = "mem_banks/*/used_memory"
|
||||
|
||||
// Direct Rendering Manager sysfs location
|
||||
DRMDeviceDirGlob = "/sys/class/drm/card*/device"
|
||||
DRMTotalMemoryFile = "mem_info_vram_total"
|
||||
DRMUsedMemoryFile = "mem_info_vram_used"
|
||||
|
||||
// In hex; properties file is in decimal
|
||||
DRMUniqueIDFile = "unique_id"
|
||||
DRMVendorFile = "vendor"
|
||||
DRMDeviceFile = "device"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,8 +45,8 @@ var (
|
||||
)
|
||||
|
||||
// Gather GPU information from the amdgpu driver if any supported GPUs are detected
|
||||
func AMDGetGPUInfo() []GpuInfo {
|
||||
resp := []GpuInfo{}
|
||||
func AMDGetGPUInfo() []RocmGPUInfo {
|
||||
resp := []RocmGPUInfo{}
|
||||
if !AMDDetected() {
|
||||
return resp
|
||||
}
|
||||
@@ -50,9 +60,9 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
|
||||
// Determine if the user has already pre-selected which GPUs to look at, then ignore the others
|
||||
var visibleDevices []string
|
||||
hipVD := os.Getenv("HIP_VISIBLE_DEVICES") // zero based index only
|
||||
rocrVD := os.Getenv("ROCR_VISIBLE_DEVICES") // zero based index or UUID, but consumer cards seem to not support UUID
|
||||
gpuDO := os.Getenv("GPU_DEVICE_ORDINAL") // zero based index
|
||||
hipVD := envconfig.HipVisibleDevices // zero based index only
|
||||
rocrVD := envconfig.RocrVisibleDevices // zero based index or UUID, but consumer cards seem to not support UUID
|
||||
gpuDO := envconfig.GpuDeviceOrdinal // zero based index
|
||||
switch {
|
||||
// TODO is this priorty order right?
|
||||
case hipVD != "":
|
||||
@@ -65,7 +75,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
visibleDevices = strings.Split(gpuDO, ",")
|
||||
}
|
||||
|
||||
gfxOverride := os.Getenv("HSA_OVERRIDE_GFX_VERSION")
|
||||
gfxOverride := envconfig.HsaOverrideGfxVersion
|
||||
var supported []string
|
||||
libDir := ""
|
||||
|
||||
@@ -90,7 +100,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
scanner := bufio.NewScanner(fp)
|
||||
isCPU := false
|
||||
var major, minor, patch uint64
|
||||
var vendor, device uint64
|
||||
var vendor, device, uniqueID uint64
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
// Note: we could also use "cpu_cores_count X" where X is greater than zero to detect CPUs
|
||||
@@ -121,30 +131,43 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
} else if strings.HasPrefix(line, "vendor_id") {
|
||||
ver := strings.Fields(line)
|
||||
if len(ver) != 2 {
|
||||
slog.Debug("malformed vendor_id", "vendor_id", line)
|
||||
slog.Debug("malformed", "vendor_id", line)
|
||||
continue
|
||||
}
|
||||
vendor, err = strconv.ParseUint(ver[1], 10, 32)
|
||||
vendor, err = strconv.ParseUint(ver[1], 10, 64)
|
||||
if err != nil {
|
||||
slog.Debug("malformed vendor_id" + line)
|
||||
slog.Debug("malformed", "vendor_id", line, "error", err)
|
||||
}
|
||||
} else if strings.HasPrefix(line, "device_id") {
|
||||
ver := strings.Fields(line)
|
||||
if len(ver) != 2 {
|
||||
slog.Debug("malformed device_id", "device_id", line)
|
||||
slog.Debug("malformed", "device_id", line)
|
||||
continue
|
||||
}
|
||||
device, err = strconv.ParseUint(ver[1], 10, 32)
|
||||
device, err = strconv.ParseUint(ver[1], 10, 64)
|
||||
if err != nil {
|
||||
slog.Debug("malformed device_id" + line)
|
||||
slog.Debug("malformed", "device_id", line, "error", err)
|
||||
}
|
||||
} else if strings.HasPrefix(line, "unique_id") {
|
||||
ver := strings.Fields(line)
|
||||
if len(ver) != 2 {
|
||||
slog.Debug("malformed", "unique_id", line)
|
||||
continue
|
||||
}
|
||||
uniqueID, err = strconv.ParseUint(ver[1], 10, 64)
|
||||
if err != nil {
|
||||
slog.Debug("malformed", "unique_id", line, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO - any other properties we want to extract and record?
|
||||
// vendor_id + device_id -> pci lookup for "Name"
|
||||
// Other metrics that may help us understand relative performance between multiple GPUs
|
||||
}
|
||||
|
||||
// Note: while ./mem_banks/*/used_memory exists, it doesn't appear to take other VRAM consumers
|
||||
// into consideration, so we instead map the device over to the DRM driver sysfs nodes which
|
||||
// do reliably report VRAM usage.
|
||||
|
||||
if isCPU {
|
||||
cpuCount++
|
||||
continue
|
||||
@@ -156,7 +179,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
// Shouldn't happen, but just in case...
|
||||
if gpuID < 0 {
|
||||
slog.Error("unexpected amdgpu sysfs data resulted in negative GPU ID, please set OLLAMA_DEBUG=1 and report an issue")
|
||||
return []GpuInfo{}
|
||||
return nil
|
||||
}
|
||||
|
||||
if int(major) < RocmComputeMin {
|
||||
@@ -167,65 +190,68 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
// Look up the memory for the current node
|
||||
totalMemory := uint64(0)
|
||||
usedMemory := uint64(0)
|
||||
propGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(nodeID), GPUTotalMemoryFileGlob)
|
||||
propFiles, err := filepath.Glob(propGlob)
|
||||
if err != nil {
|
||||
slog.Warn("error looking up total GPU memory", "glob", propGlob, "error", err)
|
||||
var usedFile string
|
||||
mapping := []struct {
|
||||
id uint64
|
||||
filename string
|
||||
}{
|
||||
{vendor, DRMVendorFile},
|
||||
{device, DRMDeviceFile},
|
||||
{uniqueID, DRMUniqueIDFile}, // Not all devices will report this
|
||||
}
|
||||
// 1 or more memory banks - sum the values of all of them
|
||||
for _, propFile := range propFiles {
|
||||
fp, err := os.Open(propFile)
|
||||
if err != nil {
|
||||
slog.Warn("failed to open sysfs node", "file", propFile, "erroir", err)
|
||||
continue
|
||||
}
|
||||
defer fp.Close()
|
||||
scanner := bufio.NewScanner(fp)
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(line, "size_in_bytes") {
|
||||
ver := strings.Fields(line)
|
||||
if len(ver) != 2 {
|
||||
slog.Warn("malformed " + line)
|
||||
continue
|
||||
}
|
||||
bankSizeInBytes, err := strconv.ParseUint(ver[1], 10, 64)
|
||||
if err != nil {
|
||||
slog.Warn("malformed int " + line)
|
||||
continue
|
||||
}
|
||||
totalMemory += bankSizeInBytes
|
||||
slog.Debug("mapping amdgpu to drm sysfs nodes", "amdgpu", match, "vendor", vendor, "device", device, "unique_id", uniqueID)
|
||||
// Map over to DRM location to find the total/free memory
|
||||
drmMatches, _ := filepath.Glob(DRMDeviceDirGlob)
|
||||
for _, devDir := range drmMatches {
|
||||
matched := true
|
||||
for _, m := range mapping {
|
||||
if m.id == 0 {
|
||||
// Null ID means it didn't populate, so we can't use it to match
|
||||
continue
|
||||
}
|
||||
filename := filepath.Join(devDir, m.filename)
|
||||
buf, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
slog.Debug("failed to read sysfs node", "file", filename, "error", err)
|
||||
matched = false
|
||||
break
|
||||
}
|
||||
// values here are in hex, strip off the lead 0x and parse so we can compare the numeric (decimal) values in amdgpu
|
||||
cmp, err := strconv.ParseUint(strings.TrimPrefix(strings.TrimSpace(string(buf)), "0x"), 16, 64)
|
||||
if err != nil {
|
||||
slog.Debug("failed to parse sysfs node", "file", filename, "error", err)
|
||||
matched = false
|
||||
break
|
||||
}
|
||||
if cmp != m.id {
|
||||
matched = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if totalMemory == 0 {
|
||||
slog.Warn("amdgpu reports zero total memory", "gpu", gpuID)
|
||||
continue
|
||||
}
|
||||
usedGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(nodeID), GPUUsedMemoryFileGlob)
|
||||
usedFiles, err := filepath.Glob(usedGlob)
|
||||
if err != nil {
|
||||
slog.Warn("error looking up used GPU memory", "glob", usedGlob, "error", err)
|
||||
continue
|
||||
}
|
||||
for _, usedFile := range usedFiles {
|
||||
fp, err := os.Open(usedFile)
|
||||
if err != nil {
|
||||
slog.Warn("failed to open sysfs node", "file", usedFile, "error", err)
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
defer fp.Close()
|
||||
data, err := io.ReadAll(fp)
|
||||
|
||||
// Found the matching DRM directory
|
||||
slog.Debug("matched", "amdgpu", match, "drm", devDir)
|
||||
totalFile := filepath.Join(devDir, DRMTotalMemoryFile)
|
||||
buf, err := os.ReadFile(totalFile)
|
||||
if err != nil {
|
||||
slog.Warn("failed to read sysfs node", "file", usedFile, "error", err)
|
||||
continue
|
||||
slog.Debug("failed to read sysfs node", "file", totalFile, "error", err)
|
||||
break
|
||||
}
|
||||
used, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
totalMemory, err = strconv.ParseUint(strings.TrimSpace(string(buf)), 10, 64)
|
||||
if err != nil {
|
||||
slog.Warn("malformed used memory", "data", string(data), "error", err)
|
||||
continue
|
||||
slog.Debug("failed to parse sysfs node", "file", totalFile, "error", err)
|
||||
break
|
||||
}
|
||||
usedMemory += used
|
||||
|
||||
usedFile = filepath.Join(devDir, DRMUsedMemoryFile)
|
||||
usedMemory, err = getFreeMemory(usedFile)
|
||||
if err != nil {
|
||||
slog.Debug("failed to update used memory", "error", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||
@@ -241,18 +267,21 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
|
||||
slog.Debug("amdgpu memory", "gpu", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||
slog.Debug("amdgpu memory", "gpu", gpuID, "available", format.HumanBytes2(totalMemory-usedMemory))
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "rocm",
|
||||
memInfo: memInfo{
|
||||
TotalMemory: totalMemory,
|
||||
FreeMemory: (totalMemory - usedMemory),
|
||||
gpuInfo := RocmGPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
Library: "rocm",
|
||||
memInfo: memInfo{
|
||||
TotalMemory: totalMemory,
|
||||
FreeMemory: (totalMemory - usedMemory),
|
||||
},
|
||||
ID: strconv.Itoa(gpuID),
|
||||
Name: name,
|
||||
Compute: fmt.Sprintf("gfx%d%x%x", major, minor, patch),
|
||||
MinimumMemory: rocmMinimumMemory,
|
||||
DriverMajor: driverMajor,
|
||||
DriverMinor: driverMinor,
|
||||
},
|
||||
ID: fmt.Sprintf("%d", gpuID),
|
||||
Name: name,
|
||||
Compute: fmt.Sprintf("gfx%d%x%x", major, minor, patch),
|
||||
MinimumMemory: rocmMinimumMemory,
|
||||
DriverMajor: driverMajor,
|
||||
DriverMinor: driverMinor,
|
||||
usedFilepath: usedFile,
|
||||
}
|
||||
|
||||
// If the user wants to filter to a subset of devices, filter out if we aren't a match
|
||||
@@ -276,7 +305,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
libDir, err = AMDValidateLibDir()
|
||||
if err != nil {
|
||||
slog.Warn("unable to verify rocm library, will use cpu", "error", err)
|
||||
return []GpuInfo{}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
gpuInfo.DependencyPath = libDir
|
||||
@@ -287,7 +316,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
supported, err = GetSupportedGFX(libDir)
|
||||
if err != nil {
|
||||
slog.Warn("failed to lookup supported GFX types, falling back to CPU mode", "error", err)
|
||||
return []GpuInfo{}
|
||||
return nil
|
||||
}
|
||||
slog.Debug("rocm supported GPUs", "types", supported)
|
||||
}
|
||||
@@ -304,6 +333,11 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
slog.Info("skipping rocm gfx compatibility check", "HSA_OVERRIDE_GFX_VERSION", gfxOverride)
|
||||
}
|
||||
|
||||
// Check for env var workarounds
|
||||
if name == "1002:687f" { // Vega RX 56
|
||||
gpuInfo.EnvWorkarounds = append(gpuInfo.EnvWorkarounds, [2]string{"HSA_ENABLE_SDMA", "0"})
|
||||
}
|
||||
|
||||
// The GPU has passed all the verification steps and is supported
|
||||
resp = append(resp, gpuInfo)
|
||||
}
|
||||
@@ -378,3 +412,31 @@ func AMDDriverVersion() (driverMajor, driverMinor int, err error) {
|
||||
}
|
||||
return driverMajor, driverMinor, nil
|
||||
}
|
||||
|
||||
func (gpus RocmGPUInfoList) RefreshFreeMemory() error {
|
||||
if len(gpus) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := range gpus {
|
||||
usedMemory, err := getFreeMemory(gpus[i].usedFilepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slog.Debug("updating rocm free memory", "gpu", gpus[i].ID, "name", gpus[i].Name, "before", format.HumanBytes2(gpus[i].FreeMemory), "now", format.HumanBytes2(gpus[i].TotalMemory-usedMemory))
|
||||
gpus[i].FreeMemory = gpus[i].TotalMemory - usedMemory
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFreeMemory(usedFile string) (uint64, error) {
|
||||
buf, err := os.ReadFile(usedFile)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read sysfs node %s %w", usedFile, err)
|
||||
}
|
||||
usedMemory, err := strconv.ParseUint(strings.TrimSpace(string(buf)), 10, 64)
|
||||
if err != nil {
|
||||
slog.Debug("failed to parse sysfs node", "file", usedFile, "error", err)
|
||||
return 0, fmt.Errorf("failed to parse sysfs node %s %w", usedFile, err)
|
||||
}
|
||||
return usedMemory, nil
|
||||
}
|
||||
|
@@ -7,8 +7,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
)
|
||||
|
||||
@@ -24,8 +26,8 @@ var (
|
||||
RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\5.7\\bin"} // TODO glob?
|
||||
)
|
||||
|
||||
func AMDGetGPUInfo() []GpuInfo {
|
||||
resp := []GpuInfo{}
|
||||
func AMDGetGPUInfo() []RocmGPUInfo {
|
||||
resp := []RocmGPUInfo{}
|
||||
hl, err := NewHipLib()
|
||||
if err != nil {
|
||||
slog.Debug(err.Error())
|
||||
@@ -52,7 +54,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
}
|
||||
|
||||
var supported []string
|
||||
gfxOverride := os.Getenv("HSA_OVERRIDE_GFX_VERSION")
|
||||
gfxOverride := envconfig.HsaOverrideGfxVersion
|
||||
if gfxOverride == "" {
|
||||
supported, err = GetSupportedGFX(libDir)
|
||||
if err != nil {
|
||||
@@ -113,25 +115,29 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO revisit this once ROCm v6 is available on windows.
|
||||
// v5.7 only reports VRAM used by this process, so it's completely wrong and unusable
|
||||
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "rocm",
|
||||
memInfo: memInfo{
|
||||
TotalMemory: totalMemory,
|
||||
FreeMemory: freeMemory,
|
||||
},
|
||||
ID: fmt.Sprintf("%d", i), // TODO this is probably wrong if we specify visible devices
|
||||
DependencyPath: libDir,
|
||||
MinimumMemory: rocmMinimumMemory,
|
||||
Name: name,
|
||||
Compute: gfx,
|
||||
gpuInfo := RocmGPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
Library: "rocm",
|
||||
memInfo: memInfo{
|
||||
TotalMemory: totalMemory,
|
||||
FreeMemory: freeMemory,
|
||||
},
|
||||
// Free memory reporting on Windows is not reliable until we bump to ROCm v6.2
|
||||
UnreliableFreeMemory: true,
|
||||
|
||||
// TODO - this information isn't accurate on windows, so don't report it until we find the right way to retrieve
|
||||
// DriverMajor: driverMajor,
|
||||
// DriverMinor: driverMinor,
|
||||
ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices
|
||||
DependencyPath: libDir,
|
||||
MinimumMemory: rocmMinimumMemory,
|
||||
Name: name,
|
||||
Compute: gfx,
|
||||
|
||||
// TODO - this information isn't accurate on windows, so don't report it until we find the right way to retrieve
|
||||
// DriverMajor: driverMajor,
|
||||
// DriverMinor: driverMinor,
|
||||
},
|
||||
index: i,
|
||||
}
|
||||
|
||||
resp = append(resp, gpuInfo)
|
||||
@@ -159,3 +165,30 @@ func AMDValidateLibDir() (string, error) {
|
||||
slog.Warn("amdgpu detected, but no compatible rocm library found. Please install ROCm")
|
||||
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
||||
}
|
||||
|
||||
func (gpus RocmGPUInfoList) RefreshFreeMemory() error {
|
||||
if len(gpus) == 0 {
|
||||
return nil
|
||||
}
|
||||
hl, err := NewHipLib()
|
||||
if err != nil {
|
||||
slog.Debug(err.Error())
|
||||
return nil
|
||||
}
|
||||
defer hl.Release()
|
||||
|
||||
for i := range gpus {
|
||||
err := hl.HipSetDevice(gpus[i].index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
freeMemory, _, err := hl.HipMemGetInfo()
|
||||
if err != nil {
|
||||
slog.Warn("get mem info", "id", i, "error", err)
|
||||
continue
|
||||
}
|
||||
slog.Debug("updating rocm free memory", "gpu", gpus[i].ID, "name", gpus[i].Name, "before", format.HumanBytes2(gpus[i].FreeMemory), "now", format.HumanBytes2(freeMemory))
|
||||
gpus[i].FreeMemory = freeMemory
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -77,20 +77,27 @@ func cleanupTmpDirs() {
|
||||
continue
|
||||
}
|
||||
raw, err := os.ReadFile(filepath.Join(d, "ollama.pid"))
|
||||
if err == nil {
|
||||
pid, err := strconv.Atoi(string(raw))
|
||||
if err == nil {
|
||||
if proc, err := os.FindProcess(pid); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) {
|
||||
// Another running ollama, ignore this tmpdir
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
slog.Debug("failed to open ollama.pid", "path", d, "error", err)
|
||||
}
|
||||
err = os.RemoveAll(d)
|
||||
if err != nil {
|
||||
slog.Debug("unable to cleanup stale tmpdir", "path", d, "error", err)
|
||||
slog.Warn("failed to read ollama.pid", "path", d, "error", err)
|
||||
// No pid, ignore this tmpdir
|
||||
continue
|
||||
}
|
||||
|
||||
pid, err := strconv.Atoi(string(raw))
|
||||
if err != nil {
|
||||
slog.Warn("failed to parse pid", "path", d, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) {
|
||||
slog.Warn("found running ollama", "pid", pid, "path", d)
|
||||
// Another running ollama, ignore this tmpdir
|
||||
continue
|
||||
}
|
||||
|
||||
if err := os.Remove(d); err != nil {
|
||||
slog.Warn("unable to cleanup stale tmpdir", "path", d, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,21 +1,16 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
func GetCPUVariant() string {
|
||||
func GetCPUCapability() CPUCapability {
|
||||
if cpu.X86.HasAVX2 {
|
||||
slog.Debug("CPU has AVX2")
|
||||
return "avx2"
|
||||
return CPUCapabilityAVX2
|
||||
}
|
||||
if cpu.X86.HasAVX {
|
||||
slog.Debug("CPU has AVX")
|
||||
return "avx"
|
||||
return CPUCapabilityAVX
|
||||
}
|
||||
slog.Debug("CPU does not have vector extensions")
|
||||
// else LCD
|
||||
return ""
|
||||
return CPUCapabilityNone
|
||||
}
|
||||
|
537
gpu/gpu.go
537
gpu/gpu.go
@@ -24,19 +24,37 @@ import (
|
||||
"github.com/ollama/ollama/format"
|
||||
)
|
||||
|
||||
type handles struct {
|
||||
type cudaHandles struct {
|
||||
deviceCount int
|
||||
cudart *C.cudart_handle_t
|
||||
nvcuda *C.nvcuda_handle_t
|
||||
nvml *C.nvml_handle_t
|
||||
}
|
||||
|
||||
type oneapiHandles struct {
|
||||
oneapi *C.oneapi_handle_t
|
||||
deviceCount int
|
||||
}
|
||||
|
||||
const (
|
||||
cudaMinimumMemory = 457 * format.MebiByte
|
||||
rocmMinimumMemory = 457 * format.MebiByte
|
||||
// TODO OneAPI minimum memory
|
||||
)
|
||||
|
||||
var gpuMutex sync.Mutex
|
||||
var (
|
||||
gpuMutex sync.Mutex
|
||||
bootstrapped bool
|
||||
cpuCapability CPUCapability
|
||||
cpus []CPUInfo
|
||||
cudaGPUs []CudaGPUInfo
|
||||
nvcudaLibPath string
|
||||
cudartLibPath string
|
||||
oneapiLibPath string
|
||||
nvmlLibPath string
|
||||
rocmGPUs []RocmGPUInfo
|
||||
oneapiGPUs []OneapiGPUInfo
|
||||
)
|
||||
|
||||
// With our current CUDA compile flags, older than 5.0 will not work properly
|
||||
var CudaComputeMin = [2]C.int{5, 0}
|
||||
@@ -46,113 +64,113 @@ var RocmComputeMin = 9
|
||||
// TODO find a better way to detect iGPU instead of minimum memory
|
||||
const IGPUMemLimit = 1 * format.GibiByte // 512G is what they typically report, so anything less than 1G must be iGPU
|
||||
|
||||
var CudartLinuxGlobs = []string{
|
||||
"/usr/local/cuda/lib64/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/libcudart.so*",
|
||||
"/usr/lib/wsl/lib/libcudart.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcudart.so*",
|
||||
"/opt/cuda/lib64/libcudart.so*",
|
||||
"/usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/libcudart.so*",
|
||||
"/usr/local/cuda/lib*/libcudart.so*",
|
||||
"/usr/lib*/libcudart.so*",
|
||||
"/usr/local/lib*/libcudart.so*",
|
||||
}
|
||||
|
||||
var CudartWindowsGlobs = []string{
|
||||
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
||||
}
|
||||
|
||||
var NvcudaLinuxGlobs = []string{
|
||||
"/usr/local/cuda*/targets/*/lib/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/libcuda.so*",
|
||||
"/usr/lib/wsl/lib/libcuda.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcuda.so*",
|
||||
"/opt/cuda/lib*/libcuda.so*",
|
||||
"/usr/local/cuda/lib*/libcuda.so*",
|
||||
"/usr/lib*/libcuda.so*",
|
||||
"/usr/local/lib*/libcuda.so*",
|
||||
}
|
||||
|
||||
var NvcudaWindowsGlobs = []string{
|
||||
"c:\\windows\\system*\\nvcuda.dll",
|
||||
}
|
||||
|
||||
var OneapiWindowsGlobs = []string{
|
||||
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
||||
}
|
||||
|
||||
var OneapiLinuxGlobs = []string{
|
||||
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
||||
"/usr/lib*/libze_intel_gpu.so*",
|
||||
}
|
||||
|
||||
// Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
|
||||
// Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
|
||||
var CudaTegra string = os.Getenv("JETSON_JETPACK")
|
||||
|
||||
// Note: gpuMutex must already be held
|
||||
func initGPUHandles() *handles {
|
||||
func initCudaHandles() *cudaHandles {
|
||||
|
||||
// TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
|
||||
|
||||
gpuHandles := &handles{}
|
||||
var cudartMgmtName string
|
||||
var cudartMgmtPatterns []string
|
||||
var nvcudaMgmtName string
|
||||
var nvcudaMgmtPatterns []string
|
||||
|
||||
tmpDir, _ := PayloadsDir()
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
cudartMgmtName = "cudart64_*.dll"
|
||||
localAppData := os.Getenv("LOCALAPPDATA")
|
||||
cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", cudartMgmtName)}
|
||||
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartWindowsGlobs...)
|
||||
// Aligned with driver, we can't carry as payloads
|
||||
nvcudaMgmtName = "nvcuda.dll"
|
||||
nvcudaMgmtPatterns = NvcudaWindowsGlobs
|
||||
case "linux":
|
||||
cudartMgmtName = "libcudart.so*"
|
||||
if tmpDir != "" {
|
||||
// TODO - add "payloads" for subprocess
|
||||
cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", cudartMgmtName)}
|
||||
}
|
||||
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartLinuxGlobs...)
|
||||
// Aligned with driver, we can't carry as payloads
|
||||
nvcudaMgmtName = "libcuda.so*"
|
||||
nvcudaMgmtPatterns = NvcudaLinuxGlobs
|
||||
default:
|
||||
return gpuHandles
|
||||
cHandles := &cudaHandles{}
|
||||
// Short Circuit if we already know which library to use
|
||||
if nvmlLibPath != "" {
|
||||
cHandles.nvml, _ = LoadNVMLMgmt([]string{nvmlLibPath})
|
||||
return cHandles
|
||||
}
|
||||
if nvcudaLibPath != "" {
|
||||
cHandles.deviceCount, cHandles.nvcuda, _ = LoadNVCUDAMgmt([]string{nvcudaLibPath})
|
||||
return cHandles
|
||||
}
|
||||
if cudartLibPath != "" {
|
||||
cHandles.deviceCount, cHandles.cudart, _ = LoadCUDARTMgmt([]string{cudartLibPath})
|
||||
return cHandles
|
||||
}
|
||||
|
||||
slog.Debug("Detecting GPUs")
|
||||
nvcudaLibPaths := FindGPULibs(nvcudaMgmtName, nvcudaMgmtPatterns)
|
||||
slog.Debug("searching for GPU discovery libraries for NVIDIA")
|
||||
var cudartMgmtPatterns []string
|
||||
|
||||
// Aligned with driver, we can't carry as payloads
|
||||
nvcudaMgmtPatterns := NvcudaGlobs
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
localAppData := os.Getenv("LOCALAPPDATA")
|
||||
cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", CudartMgmtName)}
|
||||
}
|
||||
tmpDir, _ := PayloadsDir()
|
||||
if tmpDir != "" {
|
||||
// TODO - add "payloads" for subprocess
|
||||
cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", CudartMgmtName)}
|
||||
}
|
||||
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartGlobs...)
|
||||
|
||||
if len(NvmlGlobs) > 0 {
|
||||
nvmlLibPaths := FindGPULibs(NvmlMgmtName, NvmlGlobs)
|
||||
if len(nvmlLibPaths) > 0 {
|
||||
nvml, libPath := LoadNVMLMgmt(nvmlLibPaths)
|
||||
if nvml != nil {
|
||||
slog.Debug("nvidia-ml loaded", "library", libPath)
|
||||
cHandles.nvml = nvml
|
||||
nvmlLibPath = libPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvcudaLibPaths := FindGPULibs(NvcudaMgmtName, nvcudaMgmtPatterns)
|
||||
if len(nvcudaLibPaths) > 0 {
|
||||
deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths)
|
||||
if nvcuda != nil {
|
||||
slog.Debug("detected GPUs", "count", deviceCount, "library", libPath)
|
||||
gpuHandles.nvcuda = nvcuda
|
||||
gpuHandles.deviceCount = deviceCount
|
||||
return gpuHandles
|
||||
cHandles.nvcuda = nvcuda
|
||||
cHandles.deviceCount = deviceCount
|
||||
nvcudaLibPath = libPath
|
||||
return cHandles
|
||||
}
|
||||
}
|
||||
|
||||
cudartLibPaths := FindGPULibs(cudartMgmtName, cudartMgmtPatterns)
|
||||
cudartLibPaths := FindGPULibs(CudartMgmtName, cudartMgmtPatterns)
|
||||
if len(cudartLibPaths) > 0 {
|
||||
deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths)
|
||||
if cudart != nil {
|
||||
slog.Debug("detected GPUs", "library", libPath, "count", deviceCount)
|
||||
gpuHandles.cudart = cudart
|
||||
gpuHandles.deviceCount = deviceCount
|
||||
return gpuHandles
|
||||
cHandles.cudart = cudart
|
||||
cHandles.deviceCount = deviceCount
|
||||
cudartLibPath = libPath
|
||||
return cHandles
|
||||
}
|
||||
}
|
||||
|
||||
return gpuHandles
|
||||
return cHandles
|
||||
}
|
||||
|
||||
// Note: gpuMutex must already be held
|
||||
func initOneAPIHandles() *oneapiHandles {
|
||||
oHandles := &oneapiHandles{}
|
||||
|
||||
// Short Circuit if we already know which library to use
|
||||
if oneapiLibPath != "" {
|
||||
oHandles.deviceCount, oHandles.oneapi, _ = LoadOneapiMgmt([]string{oneapiLibPath})
|
||||
return oHandles
|
||||
}
|
||||
|
||||
oneapiLibPaths := FindGPULibs(OneapiMgmtName, OneapiGlobs)
|
||||
if len(oneapiLibPaths) > 0 {
|
||||
oHandles.deviceCount, oHandles.oneapi, oneapiLibPath = LoadOneapiMgmt(oneapiLibPaths)
|
||||
}
|
||||
|
||||
return oHandles
|
||||
}
|
||||
|
||||
func GetCPUInfo() GpuInfoList {
|
||||
gpuMutex.Lock()
|
||||
if !bootstrapped {
|
||||
gpuMutex.Unlock()
|
||||
GetGPUInfo()
|
||||
} else {
|
||||
gpuMutex.Unlock()
|
||||
}
|
||||
return GpuInfoList{cpus[0].GpuInfo}
|
||||
}
|
||||
|
||||
func GetGPUInfo() GpuInfoList {
|
||||
@@ -160,112 +178,258 @@ func GetGPUInfo() GpuInfoList {
|
||||
// GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
|
||||
gpuMutex.Lock()
|
||||
defer gpuMutex.Unlock()
|
||||
|
||||
gpuHandles := initGPUHandles()
|
||||
needRefresh := true
|
||||
var cHandles *cudaHandles
|
||||
var oHandles *oneapiHandles
|
||||
defer func() {
|
||||
if gpuHandles.cudart != nil {
|
||||
C.cudart_release(*gpuHandles.cudart)
|
||||
if cHandles != nil {
|
||||
if cHandles.cudart != nil {
|
||||
C.cudart_release(*cHandles.cudart)
|
||||
}
|
||||
if cHandles.nvcuda != nil {
|
||||
C.nvcuda_release(*cHandles.nvcuda)
|
||||
}
|
||||
if cHandles.nvml != nil {
|
||||
C.nvml_release(*cHandles.nvml)
|
||||
}
|
||||
}
|
||||
if gpuHandles.nvcuda != nil {
|
||||
C.nvcuda_release(*gpuHandles.nvcuda)
|
||||
if oHandles != nil {
|
||||
if oHandles.oneapi != nil {
|
||||
// TODO - is this needed?
|
||||
C.oneapi_release(*oHandles.oneapi)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// All our GPU builds on x86 have AVX enabled, so fallback to CPU if we don't detect at least AVX
|
||||
cpuVariant := GetCPUVariant()
|
||||
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
||||
slog.Warn("CPU does not have AVX or AVX2, disabling GPU support.")
|
||||
}
|
||||
if !bootstrapped {
|
||||
slog.Info("looking for compatible GPUs")
|
||||
needRefresh = false
|
||||
cpuCapability = GetCPUCapability()
|
||||
var memInfo C.mem_info_t
|
||||
|
||||
// On windows we bundle the nvidia library one level above the runner dir
|
||||
depPath := ""
|
||||
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
||||
depPath = filepath.Dir(envconfig.RunnersDir)
|
||||
}
|
||||
|
||||
var memInfo C.mem_info_t
|
||||
resp := []GpuInfo{}
|
||||
|
||||
// NVIDIA first
|
||||
for i := range gpuHandles.deviceCount {
|
||||
// TODO once we support CPU compilation variants of GPU libraries refine this...
|
||||
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
||||
continue
|
||||
mem, err := GetCPUMem()
|
||||
if err != nil {
|
||||
slog.Warn("error looking up system memory", "error", err)
|
||||
}
|
||||
if gpuHandles.cudart != nil || gpuHandles.nvcuda != nil {
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "cuda",
|
||||
cpus = []CPUInfo{CPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
memInfo: mem,
|
||||
Library: "cpu",
|
||||
Variant: cpuCapability,
|
||||
ID: "0",
|
||||
},
|
||||
}}
|
||||
|
||||
// Fallback to CPU mode if we're lacking required vector extensions on x86
|
||||
if cpuCapability < GPURunnerCPUCapability && runtime.GOARCH == "amd64" {
|
||||
slog.Warn("CPU does not have minimum vector extensions, GPU inference disabled", "required", GPURunnerCPUCapability, "detected", cpuCapability)
|
||||
bootstrapped = true
|
||||
// No need to do any GPU discovery, since we can't run on them
|
||||
return GpuInfoList{cpus[0].GpuInfo}
|
||||
}
|
||||
|
||||
// On windows we bundle the nvidia library one level above the runner dir
|
||||
depPath := ""
|
||||
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
||||
depPath = filepath.Join(filepath.Dir(envconfig.RunnersDir), "cuda")
|
||||
}
|
||||
|
||||
// Load ALL libraries
|
||||
cHandles = initCudaHandles()
|
||||
|
||||
// NVIDIA
|
||||
for i := range cHandles.deviceCount {
|
||||
if cHandles.cudart != nil || cHandles.nvcuda != nil {
|
||||
gpuInfo := CudaGPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
Library: "cuda",
|
||||
},
|
||||
index: i,
|
||||
}
|
||||
var driverMajor int
|
||||
var driverMinor int
|
||||
if cHandles.cudart != nil {
|
||||
C.cudart_bootstrap(*cHandles.cudart, C.int(i), &memInfo)
|
||||
} else {
|
||||
C.nvcuda_bootstrap(*cHandles.nvcuda, C.int(i), &memInfo)
|
||||
driverMajor = int(cHandles.nvcuda.driver_major)
|
||||
driverMinor = int(cHandles.nvcuda.driver_minor)
|
||||
}
|
||||
if memInfo.err != nil {
|
||||
slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
continue
|
||||
}
|
||||
if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
|
||||
slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
|
||||
continue
|
||||
}
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
|
||||
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||
gpuInfo.DependencyPath = depPath
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.DriverMajor = driverMajor
|
||||
gpuInfo.DriverMinor = driverMinor
|
||||
|
||||
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||
cudaGPUs = append(cudaGPUs, gpuInfo)
|
||||
}
|
||||
var driverMajor int
|
||||
var driverMinor int
|
||||
if gpuHandles.cudart != nil {
|
||||
C.cudart_check_vram(*gpuHandles.cudart, C.int(i), &memInfo)
|
||||
}
|
||||
|
||||
// Intel
|
||||
if envconfig.IntelGpu {
|
||||
oHandles = initOneAPIHandles()
|
||||
// On windows we bundle the oneapi library one level above the runner dir
|
||||
depPath = ""
|
||||
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
||||
depPath = filepath.Join(filepath.Dir(envconfig.RunnersDir), "oneapi")
|
||||
}
|
||||
|
||||
for d := range oHandles.oneapi.num_drivers {
|
||||
if oHandles.oneapi == nil {
|
||||
// shouldn't happen
|
||||
slog.Warn("nil oneapi handle with driver count", "count", int(oHandles.oneapi.num_drivers))
|
||||
continue
|
||||
}
|
||||
devCount := C.oneapi_get_device_count(*oHandles.oneapi, C.int(d))
|
||||
for i := range devCount {
|
||||
gpuInfo := OneapiGPUInfo{
|
||||
GpuInfo: GpuInfo{
|
||||
Library: "oneapi",
|
||||
},
|
||||
driverIndex: int(d),
|
||||
gpuIndex: int(i),
|
||||
}
|
||||
// TODO - split bootstrapping from updating free memory
|
||||
C.oneapi_check_vram(*oHandles.oneapi, C.int(d), i, &memInfo)
|
||||
// TODO - convert this to MinimumMemory based on testing...
|
||||
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
||||
memInfo.free = C.uint64_t(totalFreeMem)
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.DependencyPath = depPath
|
||||
oneapiGPUs = append(oneapiGPUs, gpuInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rocmGPUs = AMDGetGPUInfo()
|
||||
bootstrapped = true
|
||||
if len(cudaGPUs) == 0 && len(rocmGPUs) == 0 && len(oneapiGPUs) == 0 {
|
||||
slog.Info("no compatible GPUs were discovered")
|
||||
}
|
||||
}
|
||||
|
||||
// For detected GPUs, load library if not loaded
|
||||
|
||||
// Refresh free memory usage
|
||||
if needRefresh {
|
||||
mem, err := GetCPUMem()
|
||||
if err != nil {
|
||||
slog.Warn("error looking up system memory", "error", err)
|
||||
} else {
|
||||
slog.Debug("updating system memory data",
|
||||
slog.Group(
|
||||
"before",
|
||||
"total", format.HumanBytes2(cpus[0].TotalMemory),
|
||||
"free", format.HumanBytes2(cpus[0].FreeMemory),
|
||||
),
|
||||
slog.Group(
|
||||
"now",
|
||||
"total", format.HumanBytes2(mem.TotalMemory),
|
||||
"free", format.HumanBytes2(mem.FreeMemory),
|
||||
),
|
||||
)
|
||||
cpus[0].FreeMemory = mem.FreeMemory
|
||||
}
|
||||
|
||||
var memInfo C.mem_info_t
|
||||
if cHandles == nil && len(cudaGPUs) > 0 {
|
||||
cHandles = initCudaHandles()
|
||||
}
|
||||
for i, gpu := range cudaGPUs {
|
||||
if cHandles.nvml != nil {
|
||||
C.nvml_get_free(*cHandles.nvml, C.int(gpu.index), &memInfo.free, &memInfo.total, &memInfo.used)
|
||||
} else if cHandles.cudart != nil {
|
||||
C.cudart_bootstrap(*cHandles.cudart, C.int(gpu.index), &memInfo)
|
||||
} else if cHandles.nvcuda != nil {
|
||||
C.nvcuda_get_free(*cHandles.nvcuda, C.int(gpu.index), &memInfo.free, &memInfo.total)
|
||||
memInfo.used = memInfo.total - memInfo.free
|
||||
} else {
|
||||
C.nvcuda_check_vram(*gpuHandles.nvcuda, C.int(i), &memInfo)
|
||||
driverMajor = int(gpuHandles.nvcuda.driver_major)
|
||||
driverMinor = int(gpuHandles.nvcuda.driver_minor)
|
||||
// shouldn't happen
|
||||
slog.Warn("no valid cuda library loaded to refresh vram usage")
|
||||
break
|
||||
}
|
||||
if memInfo.err != nil {
|
||||
slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||
slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
continue
|
||||
}
|
||||
if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
|
||||
slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
|
||||
if memInfo.free == 0 {
|
||||
slog.Warn("error looking up nvidia GPU memory")
|
||||
continue
|
||||
}
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
|
||||
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||
gpuInfo.DependencyPath = depPath
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.DriverMajor = driverMajor
|
||||
gpuInfo.DriverMinor = driverMinor
|
||||
slog.Debug("updating cuda memory data",
|
||||
"gpu", gpu.ID,
|
||||
"name", gpu.Name,
|
||||
slog.Group(
|
||||
"before",
|
||||
"total", format.HumanBytes2(gpu.TotalMemory),
|
||||
"free", format.HumanBytes2(gpu.FreeMemory),
|
||||
),
|
||||
slog.Group(
|
||||
"now",
|
||||
"total", format.HumanBytes2(uint64(memInfo.total)),
|
||||
"free", format.HumanBytes2(uint64(memInfo.free)),
|
||||
"used", format.HumanBytes2(uint64(memInfo.used)),
|
||||
),
|
||||
)
|
||||
cudaGPUs[i].FreeMemory = uint64(memInfo.free)
|
||||
}
|
||||
|
||||
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||
resp = append(resp, gpuInfo)
|
||||
if oHandles == nil && len(oneapiGPUs) > 0 {
|
||||
oHandles = initOneAPIHandles()
|
||||
}
|
||||
for i, gpu := range oneapiGPUs {
|
||||
if oHandles.oneapi == nil {
|
||||
// shouldn't happen
|
||||
slog.Warn("nil oneapi handle with device count", "count", oHandles.deviceCount)
|
||||
continue
|
||||
}
|
||||
C.oneapi_check_vram(*oHandles.oneapi, C.int(gpu.driverIndex), C.int(gpu.gpuIndex), &memInfo)
|
||||
// TODO - convert this to MinimumMemory based on testing...
|
||||
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
||||
memInfo.free = C.uint64_t(totalFreeMem)
|
||||
oneapiGPUs[i].FreeMemory = uint64(memInfo.free)
|
||||
}
|
||||
|
||||
err = RocmGPUInfoList(rocmGPUs).RefreshFreeMemory()
|
||||
if err != nil {
|
||||
slog.Debug("problem refreshing ROCm free memory", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Then AMD
|
||||
resp = append(resp, AMDGetGPUInfo()...)
|
||||
|
||||
resp := []GpuInfo{}
|
||||
for _, gpu := range cudaGPUs {
|
||||
resp = append(resp, gpu.GpuInfo)
|
||||
}
|
||||
for _, gpu := range rocmGPUs {
|
||||
resp = append(resp, gpu.GpuInfo)
|
||||
}
|
||||
for _, gpu := range oneapiGPUs {
|
||||
resp = append(resp, gpu.GpuInfo)
|
||||
}
|
||||
if len(resp) == 0 {
|
||||
C.cpu_check_ram(&memInfo)
|
||||
if memInfo.err != nil {
|
||||
slog.Info("error looking up CPU memory", "error", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
return resp
|
||||
}
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "cpu",
|
||||
Variant: cpuVariant,
|
||||
}
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
|
||||
resp = append(resp, gpuInfo)
|
||||
resp = append(resp, cpus[0].GpuInfo)
|
||||
}
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
var ret memInfo
|
||||
var info C.mem_info_t
|
||||
C.cpu_check_ram(&info)
|
||||
if info.err != nil {
|
||||
defer C.free(unsafe.Pointer(info.err))
|
||||
return ret, fmt.Errorf(C.GoString(info.err))
|
||||
}
|
||||
ret.FreeMemory = uint64(info.free)
|
||||
ret.TotalMemory = uint64(info.total)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
|
||||
// Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
|
||||
var ldPaths []string
|
||||
@@ -353,7 +517,23 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
||||
defer C.free(unsafe.Pointer(lib))
|
||||
C.nvcuda_init(lib, &resp)
|
||||
if resp.err != nil {
|
||||
slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err))
|
||||
// Decide what log level based on the type of error message to help users understand why
|
||||
msg := C.GoString(resp.err)
|
||||
switch resp.cudaErr {
|
||||
case C.CUDA_ERROR_INSUFFICIENT_DRIVER, C.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH:
|
||||
slog.Warn("version mismatch between driver and cuda driver library - reboot or upgrade may be required", "library", libPath, "error", msg)
|
||||
case C.CUDA_ERROR_NO_DEVICE:
|
||||
slog.Info("no nvidia devices detected", "library", libPath)
|
||||
case C.CUDA_ERROR_UNKNOWN:
|
||||
slog.Warn("unknown error initializing cuda driver library", "library", libPath, "error", msg)
|
||||
slog.Warn("see https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for more information")
|
||||
default:
|
||||
if strings.Contains(msg, "wrong ELF class") {
|
||||
slog.Debug("skipping 32bit library", "library", libPath)
|
||||
} else {
|
||||
slog.Info("unable to load cuda driver library", "library", libPath, "error", msg)
|
||||
}
|
||||
}
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
return int(resp.num_devices), &resp.ch, libPath
|
||||
@@ -362,8 +542,26 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
||||
return 0, nil, ""
|
||||
}
|
||||
|
||||
func LoadNVMLMgmt(nvmlLibPaths []string) (*C.nvml_handle_t, string) {
|
||||
var resp C.nvml_init_resp_t
|
||||
resp.ch.verbose = getVerboseState()
|
||||
for _, libPath := range nvmlLibPaths {
|
||||
lib := C.CString(libPath)
|
||||
defer C.free(unsafe.Pointer(lib))
|
||||
C.nvml_init(lib, &resp)
|
||||
if resp.err != nil {
|
||||
slog.Info(fmt.Sprintf("Unable to load NVML management library %s: %s", libPath, C.GoString(resp.err)))
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
return &resp.ch, libPath
|
||||
}
|
||||
}
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
||||
var resp C.oneapi_init_resp_t
|
||||
num_devices := 0
|
||||
resp.oh.verbose = getVerboseState()
|
||||
for _, libPath := range oneapiLibPaths {
|
||||
lib := C.CString(libPath)
|
||||
@@ -373,7 +571,10 @@ func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
||||
slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
return int(resp.num_devices), &resp.oh, libPath
|
||||
for i := range resp.oh.num_drivers {
|
||||
num_devices += int(C.oneapi_get_device_count(resp.oh, C.int(i)))
|
||||
}
|
||||
return num_devices, &resp.oh, libPath
|
||||
}
|
||||
}
|
||||
return 0, nil, ""
|
||||
|
@@ -24,7 +24,7 @@ func GetGPUInfo() GpuInfoList {
|
||||
return []GpuInfo{
|
||||
{
|
||||
Library: "cpu",
|
||||
Variant: GetCPUVariant(),
|
||||
Variant: GetCPUCapability(),
|
||||
memInfo: mem,
|
||||
},
|
||||
}
|
||||
@@ -42,10 +42,21 @@ func GetGPUInfo() GpuInfoList {
|
||||
return []GpuInfo{info}
|
||||
}
|
||||
|
||||
func GetCPUInfo() GpuInfoList {
|
||||
mem, _ := GetCPUMem()
|
||||
return []GpuInfo{
|
||||
{
|
||||
Library: "cpu",
|
||||
Variant: GetCPUCapability(),
|
||||
memInfo: mem,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
return memInfo{
|
||||
TotalMemory: uint64(C.getPhysicalMemory()),
|
||||
FreeMemory: 0,
|
||||
FreeMemory: uint64(C.getFreeMemory()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@@ -47,6 +47,7 @@ typedef struct mem_info {
|
||||
char gpu_name[GPU_NAME_LEN];
|
||||
uint64_t total;
|
||||
uint64_t free;
|
||||
uint64_t used;
|
||||
|
||||
// Compute Capability
|
||||
int major;
|
||||
@@ -62,6 +63,7 @@ void cpu_check_ram(mem_info_t *resp);
|
||||
|
||||
#include "gpu_info_cudart.h"
|
||||
#include "gpu_info_nvcuda.h"
|
||||
#include "gpu_info_nvml.h"
|
||||
#include "gpu_info_oneapi.h"
|
||||
|
||||
#endif // __GPU_INFO_H__
|
||||
|
@@ -1,45 +0,0 @@
|
||||
#include "gpu_info.h"
|
||||
// Fallbacks for CPU mode
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <sysinfoapi.h>
|
||||
void cpu_check_ram(mem_info_t *resp) {
|
||||
resp->err = NULL;
|
||||
MEMORYSTATUSEX info;
|
||||
info.dwLength = sizeof(info);
|
||||
if (GlobalMemoryStatusEx(&info) != 0) {
|
||||
resp->total = info.ullTotalPhys;
|
||||
resp->free = info.ullAvailPhys;
|
||||
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "0");
|
||||
} else {
|
||||
resp->err = LOAD_ERR();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#elif __linux__
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <sys/sysinfo.h>
|
||||
void cpu_check_ram(mem_info_t *resp) {
|
||||
struct sysinfo info;
|
||||
resp->err = NULL;
|
||||
if (sysinfo(&info) != 0) {
|
||||
resp->err = strdup(strerror(errno));
|
||||
} else {
|
||||
resp->total = info.totalram * info.mem_unit;
|
||||
resp->free = info.freeram * info.mem_unit;
|
||||
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "0");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#elif __APPLE__
|
||||
// TODO consider an Apple implementation that does something useful
|
||||
// mem_info_t cpu_check_ram() {
|
||||
// mem_info_t resp = {0, 0, NULL};
|
||||
// return resp;
|
||||
// }
|
||||
#else
|
||||
#error "Unsupported platform"
|
||||
#endif
|
@@ -40,7 +40,7 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
||||
|
||||
for (i = 0; l[i].s != NULL; i++) {
|
||||
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||
if (!l[i].p) {
|
||||
if (!*(l[i].p)) {
|
||||
char *msg = LOAD_ERR();
|
||||
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
||||
UNLOAD_LIBRARY(resp->ch.handle);
|
||||
@@ -94,7 +94,7 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
||||
}
|
||||
|
||||
|
||||
void cudart_check_vram(cudart_handle_t h, int i, mem_info_t *resp) {
|
||||
void cudart_bootstrap(cudart_handle_t h, int i, mem_info_t *resp) {
|
||||
resp->err = NULL;
|
||||
cudartMemory_t memInfo = {0,0,0};
|
||||
cudartReturn_t ret;
|
||||
@@ -166,9 +166,11 @@ void cudart_check_vram(cudart_handle_t h, int i, mem_info_t *resp) {
|
||||
|
||||
resp->total = memInfo.total;
|
||||
resp->free = memInfo.free;
|
||||
resp->used = memInfo.used;
|
||||
|
||||
LOG(h.verbose, "[%s] CUDA totalMem %lu\n", resp->gpu_id, resp->total);
|
||||
LOG(h.verbose, "[%s] CUDA freeMem %lu\n", resp->gpu_id, resp->free);
|
||||
LOG(h.verbose, "[%s] CUDA usedMem %lu\n", resp->gpu_id, resp->used);
|
||||
LOG(h.verbose, "[%s] Compute Capability %d.%d\n", resp->gpu_id, resp->major, resp->minor);
|
||||
}
|
||||
|
||||
|
@@ -140,7 +140,8 @@ typedef struct cudart_init_resp {
|
||||
} cudart_init_resp_t;
|
||||
|
||||
void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp);
|
||||
void cudart_check_vram(cudart_handle_t ch, int device_id, mem_info_t *resp);
|
||||
void cudart_bootstrap(cudart_handle_t ch, int device_id, mem_info_t *resp);
|
||||
// TODO - if we keep this library longer term, add cudart_get_free
|
||||
void cudart_release(cudart_handle_t ch);
|
||||
|
||||
#endif // __GPU_INFO_CUDART_H__
|
||||
|
@@ -2,3 +2,4 @@
|
||||
#include <stdint.h>
|
||||
uint64_t getRecommendedMaxVRAM();
|
||||
uint64_t getPhysicalMemory();
|
||||
uint64_t getFreeMemory();
|
||||
|
@@ -1,4 +1,5 @@
|
||||
// go:build darwin
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <mach/mach.h>
|
||||
#include "gpu_info_darwin.h"
|
||||
|
||||
uint64_t getRecommendedMaxVRAM() {
|
||||
@@ -8,6 +9,27 @@ uint64_t getRecommendedMaxVRAM() {
|
||||
return result;
|
||||
}
|
||||
|
||||
// getPhysicalMemory returns the total physical memory in bytes
|
||||
uint64_t getPhysicalMemory() {
|
||||
return [[NSProcessInfo processInfo] physicalMemory];
|
||||
return [NSProcessInfo processInfo].physicalMemory;
|
||||
}
|
||||
|
||||
// getFreeMemory returns the total free memory in bytes, including inactive
|
||||
// memory that can be reclaimed by the system.
|
||||
uint64_t getFreeMemory() {
|
||||
mach_port_t host_port = mach_host_self();
|
||||
mach_msg_type_number_t host_size = sizeof(vm_statistics64_data_t) / sizeof(integer_t);
|
||||
vm_size_t pagesize;
|
||||
vm_statistics64_data_t vm_stat;
|
||||
|
||||
host_page_size(host_port, &pagesize);
|
||||
if (host_statistics64(host_port, HOST_VM_INFO64, (host_info64_t)&vm_stat, &host_size) != KERN_SUCCESS) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t free_memory = (uint64_t)vm_stat.free_count * pagesize;
|
||||
free_memory += (uint64_t)vm_stat.speculative_count * pagesize;
|
||||
free_memory += (uint64_t)vm_stat.inactive_count * pagesize;
|
||||
|
||||
return free_memory;
|
||||
}
|
||||
|
@@ -7,6 +7,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
CUresult ret;
|
||||
resp->err = NULL;
|
||||
resp->num_devices = 0;
|
||||
resp->cudaErr = CUDA_SUCCESS;
|
||||
const int buflen = 256;
|
||||
char buf[buflen + 1];
|
||||
int i;
|
||||
@@ -38,12 +39,13 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
nvcuda_lib_path, msg);
|
||||
free(msg);
|
||||
resp->err = strdup(buf);
|
||||
resp->cudaErr = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; l[i].s != NULL; i++) {
|
||||
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||
if (!*l[i].p) {
|
||||
if (!*(l[i].p)) {
|
||||
char *msg = LOAD_ERR();
|
||||
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
||||
UNLOAD_LIBRARY(resp->ch.handle);
|
||||
@@ -52,6 +54,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
msg);
|
||||
free(msg);
|
||||
resp->err = strdup(buf);
|
||||
resp->cudaErr = -1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -61,12 +64,9 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
LOG(resp->ch.verbose, "cuInit err: %d\n", ret);
|
||||
UNLOAD_LIBRARY(resp->ch.handle);
|
||||
resp->ch.handle = NULL;
|
||||
if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) {
|
||||
resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama");
|
||||
return;
|
||||
}
|
||||
snprintf(buf, buflen, "nvcuda init failure: %d", ret);
|
||||
snprintf(buf, buflen, "cuda driver library init failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
resp->cudaErr = ret;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -91,12 +91,13 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||
resp->ch.handle = NULL;
|
||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
resp->cudaErr = ret;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const int buflen = 256;
|
||||
void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||
void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||
resp->err = NULL;
|
||||
nvcudaMemory_t memInfo = {0,0};
|
||||
CUresult ret;
|
||||
@@ -106,13 +107,13 @@ void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||
CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
||||
|
||||
if (h.handle == NULL) {
|
||||
resp->err = strdup("nvcuda handle isn't initialized");
|
||||
resp->err = strdup("cuda driver library handle isn't initialized");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = (*h.cuDeviceGet)(&device, i);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
snprintf(buf, buflen, "nvcuda device failed to initialize");
|
||||
snprintf(buf, buflen, "cuda driver library device failed to initialize");
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
@@ -168,14 +169,14 @@ void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||
// To get memory we have to set (and release) a context
|
||||
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
snprintf(buf, buflen, "nvcuda failed to get primary device context %d", ret);
|
||||
snprintf(buf, buflen, "cuda driver library failed to get device context %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
snprintf(buf, buflen, "nvcuda device memory info lookup failure %d", ret);
|
||||
snprintf(buf, buflen, "cuda driver library device memory info lookup failure %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
// Best effort on failure...
|
||||
(*h.cuCtxDestroy)(ctx);
|
||||
@@ -193,12 +194,47 @@ void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||
|
||||
ret = (*h.cuCtxDestroy)(ctx);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(1, "nvcuda failed to release primary device context %d", ret);
|
||||
LOG(1, "cuda driver library failed to release device context %d", ret);
|
||||
}
|
||||
}
|
||||
|
||||
void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) {
|
||||
CUresult ret;
|
||||
CUcontext ctx = NULL;
|
||||
CUdevice device = -1;
|
||||
*free = 0;
|
||||
*total = 0;
|
||||
|
||||
ret = (*h.cuDeviceGet)(&device, i);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(1, "cuda driver library device failed to initialize");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// To get memory we have to set (and release) a context
|
||||
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(1, "cuda driver library failed to get device context %d", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = (*h.cuMemGetInfo_v2)(free, total);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(1, "cuda driver library device memory info lookup failure %d", ret);
|
||||
// Best effort on failure...
|
||||
(*h.cuCtxDestroy)(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = (*h.cuCtxDestroy)(ctx);
|
||||
if (ret != CUDA_SUCCESS) {
|
||||
LOG(1, "cuda driver library failed to release device context %d", ret);
|
||||
}
|
||||
}
|
||||
|
||||
void nvcuda_release(nvcuda_handle_t h) {
|
||||
LOG(h.verbose, "releasing nvcuda library\n");
|
||||
LOG(h.verbose, "releasing cuda driver library\n");
|
||||
UNLOAD_LIBRARY(h.handle);
|
||||
// TODO and other context release logic?
|
||||
h.handle = NULL;
|
||||
|
@@ -7,9 +7,12 @@
|
||||
typedef enum cudaError_enum {
|
||||
CUDA_SUCCESS = 0,
|
||||
CUDA_ERROR_INVALID_VALUE = 1,
|
||||
CUDA_ERROR_MEMORY_ALLOCATION = 2,
|
||||
CUDA_ERROR_OUT_OF_MEMORY = 2,
|
||||
CUDA_ERROR_NOT_INITIALIZED = 3,
|
||||
CUDA_ERROR_INSUFFICIENT_DRIVER = 35,
|
||||
CUDA_ERROR_NO_DEVICE = 100,
|
||||
CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803,
|
||||
CUDA_ERROR_UNKNOWN = 999,
|
||||
// Other values omitted for now...
|
||||
} CUresult;
|
||||
|
||||
@@ -64,10 +67,12 @@ typedef struct nvcuda_init_resp {
|
||||
char *err; // If err is non-null handle is invalid
|
||||
nvcuda_handle_t ch;
|
||||
int num_devices;
|
||||
CUresult cudaErr;
|
||||
} nvcuda_init_resp_t;
|
||||
|
||||
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp);
|
||||
void nvcuda_check_vram(nvcuda_handle_t ch, int device_id, mem_info_t *resp);
|
||||
void nvcuda_bootstrap(nvcuda_handle_t ch, int device_id, mem_info_t *resp);
|
||||
void nvcuda_get_free(nvcuda_handle_t ch, int device_id, uint64_t *free, uint64_t *total);
|
||||
void nvcuda_release(nvcuda_handle_t ch);
|
||||
|
||||
#endif // __GPU_INFO_NVCUDA_H__
|
||||
|
104
gpu/gpu_info_nvml.c
Normal file
104
gpu/gpu_info_nvml.c
Normal file
@@ -0,0 +1,104 @@
|
||||
#ifndef __APPLE__ // TODO - maybe consider nvidia support on intel macs?
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "gpu_info_nvml.h"
|
||||
|
||||
void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp) {
|
||||
nvmlReturn_t ret;
|
||||
resp->err = NULL;
|
||||
const int buflen = 256;
|
||||
char buf[buflen + 1];
|
||||
int i;
|
||||
|
||||
struct lookup {
|
||||
char *s;
|
||||
void **p;
|
||||
} l[] = {
|
||||
{"nvmlInit_v2", (void *)&resp->ch.nvmlInit_v2},
|
||||
{"nvmlShutdown", (void *)&resp->ch.nvmlShutdown},
|
||||
{"nvmlDeviceGetHandleByIndex", (void *)&resp->ch.nvmlDeviceGetHandleByIndex},
|
||||
{"nvmlDeviceGetMemoryInfo", (void *)&resp->ch.nvmlDeviceGetMemoryInfo},
|
||||
{NULL, NULL},
|
||||
};
|
||||
|
||||
resp->ch.handle = LOAD_LIBRARY(nvml_lib_path, RTLD_LAZY);
|
||||
if (!resp->ch.handle) {
|
||||
char *msg = LOAD_ERR();
|
||||
LOG(resp->ch.verbose, "library %s load err: %s\n", nvml_lib_path, msg);
|
||||
snprintf(buf, buflen,
|
||||
"Unable to load %s library to query for Nvidia GPUs: %s",
|
||||
nvml_lib_path, msg);
|
||||
free(msg);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO once we've squashed the remaining corner cases remove this log
|
||||
// LOG(resp->ch.verbose, "wiring nvidia management library functions in %s\n", nvml_lib_path);
|
||||
|
||||
for (i = 0; l[i].s != NULL; i++) {
|
||||
// TODO once we've squashed the remaining corner cases remove this log
|
||||
// LOG(resp->ch.verbose, "dlsym: %s\n", l[i].s);
|
||||
|
||||
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||
if (!*(l[i].p)) {
|
||||
resp->ch.handle = NULL;
|
||||
char *msg = LOAD_ERR();
|
||||
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
||||
UNLOAD_LIBRARY(resp->ch.handle);
|
||||
snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s,
|
||||
msg);
|
||||
free(msg);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = (*resp->ch.nvmlInit_v2)();
|
||||
if (ret != NVML_SUCCESS) {
|
||||
LOG(resp->ch.verbose, "nvmlInit_v2 err: %d\n", ret);
|
||||
UNLOAD_LIBRARY(resp->ch.handle);
|
||||
resp->ch.handle = NULL;
|
||||
snprintf(buf, buflen, "nvml vram init failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void nvml_get_free(nvml_handle_t h, int device_id, uint64_t *free, uint64_t *total, uint64_t *used) {
|
||||
nvmlDevice_t device;
|
||||
nvmlMemory_t memInfo = {0};
|
||||
nvmlReturn_t ret;
|
||||
ret = (*h.nvmlDeviceGetHandleByIndex)(device_id, &device);
|
||||
if (ret != NVML_SUCCESS) {
|
||||
LOG(1, "unable to get device handle %d: %d", device_id, ret);
|
||||
*free = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
ret = (*h.nvmlDeviceGetMemoryInfo)(device, &memInfo);
|
||||
if (ret != NVML_SUCCESS) {
|
||||
LOG(1, "device memory info lookup failure %d: %d", device_id, ret);
|
||||
*free = 0;
|
||||
return;
|
||||
}
|
||||
*free = memInfo.free;
|
||||
*total = memInfo.total;
|
||||
*used = memInfo.used;
|
||||
}
|
||||
|
||||
|
||||
void nvml_release(nvml_handle_t h) {
|
||||
LOG(h.verbose, "releasing nvml library\n");
|
||||
nvmlReturn_t ret;
|
||||
ret = (*h.nvmlShutdown)();
|
||||
if (ret != NVML_SUCCESS) {
|
||||
LOG(1, "error during nvmlShutdown %d", ret);
|
||||
}
|
||||
UNLOAD_LIBRARY(h.handle);
|
||||
h.handle = NULL;
|
||||
}
|
||||
|
||||
#endif // __APPLE__
|
48
gpu/gpu_info_nvml.h
Normal file
48
gpu/gpu_info_nvml.h
Normal file
@@ -0,0 +1,48 @@
|
||||
#ifndef __APPLE__
|
||||
#ifndef __GPU_INFO_NVML_H__
|
||||
#define __GPU_INFO_NVML_H__
|
||||
#include "gpu_info.h"
|
||||
|
||||
// Just enough typedef's to dlopen/dlsym for memory information
|
||||
typedef enum nvmlReturn_enum {
|
||||
NVML_SUCCESS = 0,
|
||||
// Other values omitted for now...
|
||||
} nvmlReturn_t;
|
||||
typedef void *nvmlDevice_t; // Opaque is sufficient
|
||||
typedef struct nvmlMemory_st {
|
||||
unsigned long long total;
|
||||
unsigned long long free;
|
||||
unsigned long long used;
|
||||
} nvmlMemory_t;
|
||||
|
||||
typedef enum nvmlBrandType_enum
|
||||
{
|
||||
NVML_BRAND_UNKNOWN = 0,
|
||||
} nvmlBrandType_t;
|
||||
|
||||
typedef struct nvml_handle {
|
||||
void *handle;
|
||||
uint16_t verbose;
|
||||
nvmlReturn_t (*nvmlInit_v2)(void);
|
||||
nvmlReturn_t (*nvmlShutdown)(void);
|
||||
nvmlReturn_t (*nvmlDeviceGetHandleByIndex)(unsigned int, nvmlDevice_t *);
|
||||
nvmlReturn_t (*nvmlDeviceGetMemoryInfo)(nvmlDevice_t, nvmlMemory_t *);
|
||||
} nvml_handle_t;
|
||||
|
||||
typedef struct nvml_init_resp {
|
||||
char *err; // If err is non-null handle is invalid
|
||||
nvml_handle_t ch;
|
||||
} nvml_init_resp_t;
|
||||
|
||||
typedef struct nvml_compute_capability {
|
||||
char *err;
|
||||
int major;
|
||||
int minor;
|
||||
} nvml_compute_capability_t;
|
||||
|
||||
void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp);
|
||||
void nvml_get_free(nvml_handle_t ch, int device_id, uint64_t *free, uint64_t *total, uint64_t *used);
|
||||
void nvml_release(nvml_handle_t ch);
|
||||
|
||||
#endif // __GPU_INFO_NVML_H__
|
||||
#endif // __APPLE__
|
@@ -4,15 +4,17 @@
|
||||
|
||||
#include <string.h>
|
||||
|
||||
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
||||
{
|
||||
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp) {
|
||||
ze_result_t ret;
|
||||
resp->err = NULL;
|
||||
resp->oh.devices = NULL;
|
||||
resp->oh.num_devices = NULL;
|
||||
resp->oh.drivers = NULL;
|
||||
resp->oh.num_drivers = 0;
|
||||
const int buflen = 256;
|
||||
char buf[buflen + 1];
|
||||
int i;
|
||||
struct lookup
|
||||
{
|
||||
int i, d;
|
||||
struct lookup {
|
||||
char *s;
|
||||
void **p;
|
||||
} l[] = {
|
||||
@@ -28,8 +30,7 @@ void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
||||
};
|
||||
|
||||
resp->oh.handle = LOAD_LIBRARY(oneapi_lib_path, RTLD_LAZY);
|
||||
if (!resp->oh.handle)
|
||||
{
|
||||
if (!resp->oh.handle) {
|
||||
char *msg = LOAD_ERR();
|
||||
snprintf(buf, buflen,
|
||||
"Unable to load %s library to query for Intel GPUs: %s\n",
|
||||
@@ -44,14 +45,12 @@ void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
||||
"wiring Level-Zero management library functions in %s\n",
|
||||
oneapi_lib_path);
|
||||
|
||||
for (i = 0; l[i].s != NULL; i++)
|
||||
{
|
||||
for (i = 0; l[i].s != NULL; i++) {
|
||||
// TODO once we've squashed the remaining corner cases remove this log
|
||||
LOG(resp->oh.verbose, "dlsym: %s\n", l[i].s);
|
||||
|
||||
*l[i].p = LOAD_SYMBOL(resp->oh.handle, l[i].s);
|
||||
if (!l[i].p)
|
||||
{
|
||||
if (!*(l[i].p)) {
|
||||
resp->oh.handle = NULL;
|
||||
char *msg = LOAD_ERR();
|
||||
LOG(resp->oh.verbose, "dlerr: %s\n", msg);
|
||||
@@ -63,23 +62,70 @@ void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
||||
}
|
||||
}
|
||||
|
||||
LOG(resp->oh.verbose, "calling zesInit\n");
|
||||
|
||||
ret = (*resp->oh.zesInit)(0);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
LOG(resp->oh.verbose, "zesInit err: %d\n", ret);
|
||||
UNLOAD_LIBRARY(resp->oh.handle);
|
||||
resp->oh.handle = NULL;
|
||||
snprintf(buf, buflen, "oneapi vram init failure: %d", ret);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
LOG(resp->oh.verbose, "zesInit err: %x\n", ret);
|
||||
snprintf(buf, buflen, "oneapi vram init failure: %x", ret);
|
||||
resp->err = strdup(buf);
|
||||
oneapi_release(resp->oh);
|
||||
return;
|
||||
}
|
||||
|
||||
(*resp->oh.zesDriverGet)(&resp->num_devices, NULL);
|
||||
LOG(resp->oh.verbose, "calling zesDriverGet\n");
|
||||
ret = (*resp->oh.zesDriverGet)(&resp->oh.num_drivers, NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
LOG(resp->oh.verbose, "zesDriverGet err: %x\n", ret);
|
||||
snprintf(buf, buflen, "unable to get driver count: %x", ret);
|
||||
resp->err = strdup(buf);
|
||||
oneapi_release(resp->oh);
|
||||
return;
|
||||
}
|
||||
LOG(resp->oh.verbose, "oneapi driver count: %d\n", resp->oh.num_drivers);
|
||||
resp->oh.drivers = malloc(resp->oh.num_drivers * sizeof(zes_driver_handle_t));
|
||||
resp->oh.num_devices = malloc(resp->oh.num_drivers * sizeof(uint32_t));
|
||||
memset(&resp->oh.num_devices[0], 0, resp->oh.num_drivers * sizeof(uint32_t));
|
||||
resp->oh.devices =
|
||||
malloc(resp->oh.num_drivers * sizeof(zes_device_handle_t *));
|
||||
ret = (*resp->oh.zesDriverGet)(&resp->oh.num_drivers, &resp->oh.drivers[0]);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
LOG(resp->oh.verbose, "zesDriverGet err: %x\n", ret);
|
||||
snprintf(buf, buflen, "unable to get driver count: %x", ret);
|
||||
resp->err = strdup(buf);
|
||||
oneapi_release(resp->oh);
|
||||
return;
|
||||
}
|
||||
|
||||
for (d = 0; d < resp->oh.num_drivers; d++) {
|
||||
LOG(resp->oh.verbose, "calling zesDeviceGet count %d: %p\n", d, resp->oh.drivers[d]);
|
||||
ret = (*resp->oh.zesDeviceGet)(resp->oh.drivers[d],
|
||||
&resp->oh.num_devices[d], NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
LOG(resp->oh.verbose, "zesDeviceGet err: %x\n", ret);
|
||||
snprintf(buf, buflen, "unable to get device count: %x", ret);
|
||||
resp->err = strdup(buf);
|
||||
oneapi_release(resp->oh);
|
||||
return;
|
||||
}
|
||||
resp->oh.devices[d] =
|
||||
malloc(resp->oh.num_devices[d] * sizeof(zes_device_handle_t));
|
||||
ret = (*resp->oh.zesDeviceGet)(
|
||||
resp->oh.drivers[d], &resp->oh.num_devices[d], resp->oh.devices[d]);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
LOG(resp->oh.verbose, "zesDeviceGet err: %x\n", ret);
|
||||
snprintf(buf, buflen, "unable to get device count: %x", ret);
|
||||
resp->err = strdup(buf);
|
||||
oneapi_release(resp->oh);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
||||
{
|
||||
void oneapi_check_vram(oneapi_handle_t h, int driver, int device,
|
||||
mem_info_t *resp) {
|
||||
ze_result_t ret;
|
||||
resp->err = NULL;
|
||||
uint64_t totalMem = 0;
|
||||
@@ -88,127 +134,126 @@ void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
||||
char buf[buflen + 1];
|
||||
int i, d, m;
|
||||
|
||||
if (h.handle == NULL)
|
||||
{
|
||||
if (h.handle == NULL) {
|
||||
resp->err = strdup("Level-Zero handle not initialized");
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t driversCount = 0;
|
||||
ret = (*h.zesDriverGet)(&driversCount, NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get driver count: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
if (driver > h.num_drivers || device > h.num_devices[driver]) {
|
||||
resp->err = strdup("driver of device index out of bounds");
|
||||
return;
|
||||
}
|
||||
LOG(h.verbose, "discovered %d Level-Zero drivers\n", driversCount);
|
||||
|
||||
zes_driver_handle_t *allDrivers =
|
||||
malloc(driversCount * sizeof(zes_driver_handle_t));
|
||||
(*h.zesDriverGet)(&driversCount, allDrivers);
|
||||
|
||||
resp->total = 0;
|
||||
resp->free = 0;
|
||||
|
||||
for (d = 0; d < driversCount; d++)
|
||||
{
|
||||
uint32_t deviceCount = 0;
|
||||
ret = (*h.zesDeviceGet)(allDrivers[d], &deviceCount, NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||
zes_device_ext_properties_t ext_props;
|
||||
ext_props.stype = ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES;
|
||||
ext_props.pNext = NULL;
|
||||
|
||||
zes_device_properties_t props;
|
||||
props.stype = ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES;
|
||||
props.pNext = &ext_props;
|
||||
|
||||
ret = (*h.zesDeviceGetProperties)(h.devices[driver][device], &props);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
snprintf(buf, buflen, "unable to get device properties: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(&resp->gpu_name[0], GPU_NAME_LEN, "%s", props.modelName);
|
||||
|
||||
// TODO this needs to map to ONEAPI_DEVICE_SELECTOR syntax
|
||||
// (this is probably wrong...)
|
||||
// TODO - the driver isn't included - what if there are multiple drivers?
|
||||
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "%d", device);
|
||||
|
||||
if (h.verbose) {
|
||||
// When in verbose mode, report more information about
|
||||
// the card we discover.
|
||||
LOG(h.verbose, "[%d:%d] oneAPI device name: %s\n", driver, device,
|
||||
props.modelName);
|
||||
LOG(h.verbose, "[%d:%d] oneAPI brand: %s\n", driver, device,
|
||||
props.brandName);
|
||||
LOG(h.verbose, "[%d:%d] oneAPI vendor: %s\n", driver, device,
|
||||
props.vendorName);
|
||||
LOG(h.verbose, "[%d:%d] oneAPI S/N: %s\n", driver, device,
|
||||
props.serialNumber);
|
||||
LOG(h.verbose, "[%d:%d] oneAPI board number: %s\n", driver, device,
|
||||
props.boardNumber);
|
||||
}
|
||||
|
||||
// TODO
|
||||
// Compute Capability equivalent in resp->major, resp->minor, resp->patch
|
||||
|
||||
uint32_t memCount = 0;
|
||||
ret = (*h.zesDeviceEnumMemoryModules)(h.devices[driver][device], &memCount,
|
||||
NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
snprintf(buf, buflen, "unable to enumerate Level-Zero memory modules: %x",
|
||||
ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG(h.verbose, "discovered %d Level-Zero memory modules\n", memCount);
|
||||
|
||||
zes_mem_handle_t *mems = malloc(memCount * sizeof(zes_mem_handle_t));
|
||||
(*h.zesDeviceEnumMemoryModules)(h.devices[driver][device], &memCount, mems);
|
||||
|
||||
for (m = 0; m < memCount; m++) {
|
||||
zes_mem_state_t state;
|
||||
state.stype = ZES_STRUCTURE_TYPE_MEM_STATE;
|
||||
state.pNext = NULL;
|
||||
ret = (*h.zesMemoryGetState)(mems[m], &state);
|
||||
if (ret != ZE_RESULT_SUCCESS) {
|
||||
snprintf(buf, buflen, "unable to get memory state: %x", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
free(mems);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG(h.verbose, "discovered %d Level-Zero devices\n", deviceCount);
|
||||
|
||||
zes_device_handle_t *devices =
|
||||
malloc(deviceCount * sizeof(zes_device_handle_t));
|
||||
(*h.zesDeviceGet)(allDrivers[d], &deviceCount, devices);
|
||||
|
||||
for (i = 0; i < deviceCount; i++)
|
||||
{
|
||||
zes_device_ext_properties_t ext_props;
|
||||
ext_props.stype = ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES;
|
||||
ext_props.pNext = NULL;
|
||||
|
||||
zes_device_properties_t props;
|
||||
props.stype = ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES;
|
||||
props.pNext = &ext_props;
|
||||
|
||||
ret = (*h.zesDeviceGetProperties)(devices[i], &props);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get device properties: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
free(devices);
|
||||
return;
|
||||
}
|
||||
|
||||
if (h.verbose)
|
||||
{
|
||||
// When in verbose mode, report more information about
|
||||
// the card we discover.
|
||||
LOG(h.verbose, "[%d] oneAPI device name: %s\n", i,
|
||||
props.modelName);
|
||||
LOG(h.verbose, "[%d] oneAPI brand: %s\n", i,
|
||||
props.brandName);
|
||||
LOG(h.verbose, "[%d] oneAPI vendor: %s\n", i,
|
||||
props.vendorName);
|
||||
LOG(h.verbose, "[%d] oneAPI S/N: %s\n", i,
|
||||
props.serialNumber);
|
||||
LOG(h.verbose, "[%d] oneAPI board number: %s\n", i,
|
||||
props.boardNumber);
|
||||
}
|
||||
|
||||
uint32_t memCount = 0;
|
||||
ret = (*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen,
|
||||
"unable to enumerate Level-Zero memory modules: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
free(devices);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG(h.verbose, "discovered %d Level-Zero memory modules\n", memCount);
|
||||
|
||||
zes_mem_handle_t *mems = malloc(memCount * sizeof(zes_mem_handle_t));
|
||||
(*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, mems);
|
||||
|
||||
for (m = 0; m < memCount; m++)
|
||||
{
|
||||
zes_mem_state_t state;
|
||||
state.stype = ZES_STRUCTURE_TYPE_MEM_STATE;
|
||||
state.pNext = NULL;
|
||||
ret = (*h.zesMemoryGetState)(mems[m], &state);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get memory state: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
free(devices);
|
||||
free(mems);
|
||||
return;
|
||||
}
|
||||
|
||||
resp->total += state.size;
|
||||
resp->free += state.free;
|
||||
}
|
||||
|
||||
free(mems);
|
||||
}
|
||||
|
||||
free(devices);
|
||||
resp->total += state.size;
|
||||
resp->free += state.free;
|
||||
}
|
||||
|
||||
free(allDrivers);
|
||||
free(mems);
|
||||
}
|
||||
|
||||
void oneapi_release(oneapi_handle_t h) {
|
||||
int d;
|
||||
LOG(h.verbose, "releasing oneapi library\n");
|
||||
for (d = 0; d < h.num_drivers; d++) {
|
||||
if (h.devices != NULL && h.devices[d] != NULL) {
|
||||
free(h.devices[d]);
|
||||
}
|
||||
}
|
||||
if (h.devices != NULL) {
|
||||
free(h.devices);
|
||||
h.devices = NULL;
|
||||
}
|
||||
if (h.num_devices != NULL) {
|
||||
free(h.num_devices);
|
||||
h.num_devices = NULL;
|
||||
}
|
||||
if (h.drivers != NULL) {
|
||||
free(h.drivers);
|
||||
h.drivers = NULL;
|
||||
}
|
||||
h.num_drivers = 0;
|
||||
UNLOAD_LIBRARY(h.handle);
|
||||
h.handle = NULL;
|
||||
}
|
||||
|
||||
int oneapi_get_device_count(oneapi_handle_t h, int driver) {
|
||||
if (h.handle == NULL || h.num_devices == NULL) {
|
||||
return 0;
|
||||
}
|
||||
if (driver > h.num_drivers) {
|
||||
return 0;
|
||||
}
|
||||
return (int)h.num_devices[driver];
|
||||
}
|
||||
|
||||
#endif // __APPLE__
|
||||
|
@@ -9,8 +9,7 @@
|
||||
#define ZE_BIT(_i) (1 << _i)
|
||||
|
||||
// Just enough typedef's to dlopen/dlsym for memory information
|
||||
typedef enum ze_result_t
|
||||
{
|
||||
typedef enum ze_result_t {
|
||||
ZE_RESULT_SUCCESS = 0,
|
||||
// Other values omitted for now...
|
||||
} ze_result_t;
|
||||
@@ -20,13 +19,11 @@ typedef struct _zes_driver_handle_t *zes_driver_handle_t;
|
||||
typedef struct _zes_device_handle_t *zes_device_handle_t;
|
||||
typedef struct _zes_mem_handle_t *zes_mem_handle_t;
|
||||
|
||||
typedef enum _ze_structure_type_t
|
||||
{
|
||||
typedef enum _ze_structure_type_t {
|
||||
ZE_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} ze_structure_type_t;
|
||||
|
||||
typedef enum _zes_structure_type_t
|
||||
{
|
||||
typedef enum _zes_structure_type_t {
|
||||
ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES = 0x1,
|
||||
ZES_STRUCTURE_TYPE_MEM_PROPERTIES = 0xb,
|
||||
ZES_STRUCTURE_TYPE_MEM_STATE = 0x1e,
|
||||
@@ -34,35 +31,29 @@ typedef enum _zes_structure_type_t
|
||||
ZES_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_structure_type_t;
|
||||
|
||||
typedef enum _zes_mem_type_t
|
||||
{
|
||||
typedef enum _zes_mem_type_t {
|
||||
ZES_MEM_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_mem_type_t;
|
||||
|
||||
typedef enum _zes_mem_loc_t
|
||||
{
|
||||
typedef enum _zes_mem_loc_t {
|
||||
ZES_MEM_LOC_SYSTEM = 0,
|
||||
ZES_MEM_LOC_DEVICE = 1,
|
||||
ZES_MEM_LOC_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_mem_loc_t;
|
||||
|
||||
typedef enum _zes_mem_health_t
|
||||
{
|
||||
typedef enum _zes_mem_health_t {
|
||||
ZES_MEM_HEALTH_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_mem_health_t;
|
||||
|
||||
typedef struct _ze_device_uuid_t
|
||||
{
|
||||
typedef struct _ze_device_uuid_t {
|
||||
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||
} ze_device_uuid_t;
|
||||
|
||||
typedef struct _zes_uuid_t
|
||||
{
|
||||
typedef struct _zes_uuid_t {
|
||||
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||
} zes_uuid_t;
|
||||
|
||||
typedef enum _ze_device_type_t
|
||||
{
|
||||
typedef enum _ze_device_type_t {
|
||||
ZE_DEVICE_TYPE_GPU = 1,
|
||||
ZE_DEVICE_TYPE_CPU = 2,
|
||||
ZE_DEVICE_TYPE_FPGA = 3,
|
||||
@@ -71,8 +62,7 @@ typedef enum _ze_device_type_t
|
||||
ZE_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} ze_device_type_t;
|
||||
|
||||
typedef enum _zes_device_type_t
|
||||
{
|
||||
typedef enum _zes_device_type_t {
|
||||
ZES_DEVICE_TYPE_GPU = 1,
|
||||
ZES_DEVICE_TYPE_CPU = 2,
|
||||
ZES_DEVICE_TYPE_FPGA = 3,
|
||||
@@ -82,8 +72,7 @@ typedef enum _zes_device_type_t
|
||||
} zes_device_type_t;
|
||||
|
||||
typedef uint32_t ze_device_property_flags_t;
|
||||
typedef enum _ze_device_property_flag_t
|
||||
{
|
||||
typedef enum _ze_device_property_flag_t {
|
||||
ZE_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||
ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||
ZE_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||
@@ -92,8 +81,7 @@ typedef enum _ze_device_property_flag_t
|
||||
} ze_device_property_flag_t;
|
||||
|
||||
typedef uint32_t zes_device_property_flags_t;
|
||||
typedef enum _zes_device_property_flag_t
|
||||
{
|
||||
typedef enum _zes_device_property_flag_t {
|
||||
ZES_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||
ZES_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||
ZES_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||
@@ -101,8 +89,7 @@ typedef enum _zes_device_property_flag_t
|
||||
ZES_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_device_property_flag_t;
|
||||
|
||||
typedef struct _ze_device_properties_t
|
||||
{
|
||||
typedef struct _ze_device_properties_t {
|
||||
ze_structure_type_t stype;
|
||||
void *pNext;
|
||||
ze_device_type_t type;
|
||||
@@ -126,8 +113,7 @@ typedef struct _ze_device_properties_t
|
||||
char name[ZE_MAX_DEVICE_NAME];
|
||||
} ze_device_properties_t;
|
||||
|
||||
typedef struct _zes_device_properties_t
|
||||
{
|
||||
typedef struct _zes_device_properties_t {
|
||||
zes_structure_type_t stype;
|
||||
void *pNext;
|
||||
ze_device_properties_t core;
|
||||
@@ -140,8 +126,7 @@ typedef struct _zes_device_properties_t
|
||||
char driverVersion[ZES_STRING_PROPERTY_SIZE];
|
||||
} zes_device_properties_t;
|
||||
|
||||
typedef struct _zes_device_ext_properties_t
|
||||
{
|
||||
typedef struct _zes_device_ext_properties_t {
|
||||
zes_structure_type_t stype;
|
||||
void *pNext;
|
||||
zes_uuid_t uuid;
|
||||
@@ -149,8 +134,7 @@ typedef struct _zes_device_ext_properties_t
|
||||
zes_device_property_flags_t flags;
|
||||
} zes_device_ext_properties_t;
|
||||
|
||||
typedef struct _zes_mem_properties_t
|
||||
{
|
||||
typedef struct _zes_mem_properties_t {
|
||||
zes_structure_type_t stype;
|
||||
void *pNext;
|
||||
zes_mem_type_t type;
|
||||
@@ -162,8 +146,7 @@ typedef struct _zes_mem_properties_t
|
||||
int32_t numChannels;
|
||||
} zes_mem_properties_t;
|
||||
|
||||
typedef struct _zes_mem_state_t
|
||||
{
|
||||
typedef struct _zes_mem_state_t {
|
||||
zes_structure_type_t stype;
|
||||
const void *pNext;
|
||||
zes_mem_health_t health;
|
||||
@@ -171,10 +154,19 @@ typedef struct _zes_mem_state_t
|
||||
uint64_t size;
|
||||
} zes_mem_state_t;
|
||||
|
||||
typedef struct oneapi_handle
|
||||
{
|
||||
typedef struct oneapi_handle {
|
||||
void *handle;
|
||||
uint16_t verbose;
|
||||
|
||||
uint32_t num_drivers;
|
||||
zes_driver_handle_t *drivers;
|
||||
uint32_t *num_devices;
|
||||
zes_device_handle_t **devices;
|
||||
|
||||
// TODO Driver major, minor information
|
||||
// int driver_major;
|
||||
// int driver_minor;
|
||||
|
||||
ze_result_t (*zesInit)(int);
|
||||
ze_result_t (*zesDriverGet)(uint32_t *pCount, zes_driver_handle_t *phDrivers);
|
||||
ze_result_t (*zesDeviceGet)(zes_driver_handle_t hDriver, uint32_t *pCount,
|
||||
@@ -191,21 +183,21 @@ typedef struct oneapi_handle
|
||||
|
||||
} oneapi_handle_t;
|
||||
|
||||
typedef struct oneapi_init_resp
|
||||
{
|
||||
typedef struct oneapi_init_resp {
|
||||
char *err; // If err is non-null handle is invalid
|
||||
int num_devices;
|
||||
oneapi_handle_t oh;
|
||||
} oneapi_init_resp_t;
|
||||
|
||||
typedef struct oneapi_version_resp
|
||||
{
|
||||
typedef struct oneapi_version_resp {
|
||||
ze_result_t status;
|
||||
char *str; // Contains version or error string if status != 0
|
||||
} oneapi_version_resp_t;
|
||||
|
||||
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp);
|
||||
void oneapi_check_vram(oneapi_handle_t rh, mem_info_t *resp);
|
||||
void oneapi_check_vram(oneapi_handle_t h, int driver, int device,
|
||||
mem_info_t *resp);
|
||||
void oneapi_release(oneapi_handle_t h);
|
||||
int oneapi_get_device_count(oneapi_handle_t h, int driver);
|
||||
|
||||
#endif // __GPU_INFO_INTEL_H__
|
||||
#endif // __APPLE__
|
||||
|
89
gpu/gpu_linux.go
Normal file
89
gpu/gpu_linux.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/format"
|
||||
)
|
||||
|
||||
var CudartGlobs = []string{
|
||||
"/usr/local/cuda/lib64/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/x86_64-linux-gnu/libcudart.so*",
|
||||
"/usr/lib/wsl/lib/libcudart.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcudart.so*",
|
||||
"/opt/cuda/lib64/libcudart.so*",
|
||||
"/usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so*",
|
||||
"/usr/lib/aarch64-linux-gnu/libcudart.so*",
|
||||
"/usr/local/cuda/lib*/libcudart.so*",
|
||||
"/usr/lib*/libcudart.so*",
|
||||
"/usr/local/lib*/libcudart.so*",
|
||||
}
|
||||
|
||||
var NvmlGlobs = []string{}
|
||||
|
||||
var NvcudaGlobs = []string{
|
||||
"/usr/local/cuda*/targets/*/lib/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
|
||||
"/usr/lib/*-linux-gnu/libcuda.so*",
|
||||
"/usr/lib/wsl/lib/libcuda.so*",
|
||||
"/usr/lib/wsl/drivers/*/libcuda.so*",
|
||||
"/opt/cuda/lib*/libcuda.so*",
|
||||
"/usr/local/cuda/lib*/libcuda.so*",
|
||||
"/usr/lib*/libcuda.so*",
|
||||
"/usr/local/lib*/libcuda.so*",
|
||||
}
|
||||
|
||||
var OneapiGlobs = []string{
|
||||
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
||||
"/usr/lib*/libze_intel_gpu.so*",
|
||||
}
|
||||
|
||||
var CudartMgmtName = "libcudart.so*"
|
||||
var NvcudaMgmtName = "libcuda.so*"
|
||||
var NvmlMgmtName = "" // not currently wired on linux
|
||||
var OneapiMgmtName = "libze_intel_gpu.so"
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
var mem memInfo
|
||||
var total, available, free, buffers, cached uint64
|
||||
f, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
return mem, err
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
switch {
|
||||
case strings.HasPrefix(line, "MemTotal:"):
|
||||
_, err = fmt.Sscanf(line, "MemTotal:%d", &total)
|
||||
case strings.HasPrefix(line, "MemAvailable:"):
|
||||
_, err = fmt.Sscanf(line, "MemAvailable:%d", &available)
|
||||
case strings.HasPrefix(line, "MemFree:"):
|
||||
_, err = fmt.Sscanf(line, "MemFree:%d", &free)
|
||||
case strings.HasPrefix(line, "Buffers:"):
|
||||
_, err = fmt.Sscanf(line, "Buffers:%d", &buffers)
|
||||
case strings.HasPrefix(line, "Cached:"):
|
||||
_, err = fmt.Sscanf(line, "Cached:%d", &cached)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return mem, err
|
||||
}
|
||||
|
||||
if total > 0 && available > 0 {
|
||||
mem.TotalMemory = total * format.KibiByte
|
||||
mem.FreeMemory = available * format.KibiByte
|
||||
return mem, nil
|
||||
}
|
||||
}
|
||||
mem.TotalMemory = total * format.KibiByte
|
||||
mem.FreeMemory = (free + buffers + cached) * format.KibiByte
|
||||
return mem, nil
|
||||
}
|
55
gpu/gpu_windows.go
Normal file
55
gpu/gpu_windows.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type MEMORYSTATUSEX struct {
|
||||
length uint32
|
||||
MemoryLoad uint32
|
||||
TotalPhys uint64
|
||||
AvailPhys uint64
|
||||
TotalPageFile uint64
|
||||
AvailPageFile uint64
|
||||
TotalVirtual uint64
|
||||
AvailVirtual uint64
|
||||
AvailExtendedVirtual uint64
|
||||
}
|
||||
|
||||
var (
|
||||
k32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
globalMemoryStatusExProc = k32.NewProc("GlobalMemoryStatusEx")
|
||||
sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MEMORYSTATUSEX{}))
|
||||
)
|
||||
|
||||
var CudartGlobs = []string{
|
||||
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
||||
}
|
||||
|
||||
var NvmlGlobs = []string{
|
||||
"c:\\Windows\\System32\\nvml.dll",
|
||||
}
|
||||
|
||||
var NvcudaGlobs = []string{
|
||||
"c:\\windows\\system*\\nvcuda.dll",
|
||||
}
|
||||
|
||||
var OneapiGlobs = []string{
|
||||
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
||||
}
|
||||
|
||||
var CudartMgmtName = "cudart64_*.dll"
|
||||
var NvcudaMgmtName = "nvcuda.dll"
|
||||
var NvmlMgmtName = "nvml.dll"
|
||||
var OneapiMgmtName = "ze_intel_gpu64.dll"
|
||||
|
||||
func GetCPUMem() (memInfo, error) {
|
||||
memStatus := MEMORYSTATUSEX{length: sizeofMemoryStatusEx}
|
||||
r1, _, err := globalMemoryStatusExProc.Call(uintptr(unsafe.Pointer(&memStatus)))
|
||||
if r1 == 0 {
|
||||
return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err)
|
||||
}
|
||||
return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys}, nil
|
||||
}
|
61
gpu/types.go
61
gpu/types.go
@@ -18,7 +18,7 @@ type GpuInfo struct {
|
||||
Library string `json:"library,omitempty"`
|
||||
|
||||
// Optional variant to select (e.g. versions, cpu feature flags)
|
||||
Variant string `json:"variant,omitempty"`
|
||||
Variant CPUCapability `json:"variant"`
|
||||
|
||||
// MinimumMemory represents the minimum memory required to use the GPU
|
||||
MinimumMemory uint64 `json:"-"`
|
||||
@@ -26,6 +26,14 @@ type GpuInfo struct {
|
||||
// Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
|
||||
DependencyPath string `json:"lib_path,omitempty"`
|
||||
|
||||
// Extra environment variables specific to the GPU as list of [key,value]
|
||||
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
||||
|
||||
// Set to true if we can NOT reliably discover FreeMemory. A value of true indicates
|
||||
// the FreeMemory is best effort, and may over or under report actual memory usage
|
||||
// False indicates FreeMemory can generally be trusted on this GPU
|
||||
UnreliableFreeMemory bool
|
||||
|
||||
// GPU information
|
||||
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
||||
Name string `json:"name"` // user friendly name if available
|
||||
@@ -38,6 +46,30 @@ type GpuInfo struct {
|
||||
// TODO other performance capability info to help in scheduling decisions
|
||||
}
|
||||
|
||||
type CPUInfo struct {
|
||||
GpuInfo
|
||||
}
|
||||
|
||||
type CudaGPUInfo struct {
|
||||
GpuInfo
|
||||
index int //nolint:unused,nolintlint
|
||||
}
|
||||
type CudaGPUInfoList []CudaGPUInfo
|
||||
|
||||
type RocmGPUInfo struct {
|
||||
GpuInfo
|
||||
usedFilepath string //nolint:unused,nolintlint
|
||||
index int //nolint:unused,nolintlint
|
||||
}
|
||||
type RocmGPUInfoList []RocmGPUInfo
|
||||
|
||||
type OneapiGPUInfo struct {
|
||||
GpuInfo
|
||||
driverIndex int //nolint:unused,nolintlint
|
||||
gpuIndex int //nolint:unused,nolintlint
|
||||
}
|
||||
type OneapiGPUInfoList []OneapiGPUInfo
|
||||
|
||||
type GpuInfoList []GpuInfo
|
||||
|
||||
// Split up the set of gpu info's by Library and variant
|
||||
@@ -47,8 +79,8 @@ func (l GpuInfoList) ByLibrary() []GpuInfoList {
|
||||
for _, info := range l {
|
||||
found := false
|
||||
requested := info.Library
|
||||
if info.Variant != "" {
|
||||
requested += "_" + info.Variant
|
||||
if info.Variant != CPUCapabilityNone {
|
||||
requested += "_" + info.Variant.String()
|
||||
}
|
||||
for i, lib := range libs {
|
||||
if lib == requested {
|
||||
@@ -86,3 +118,26 @@ type ByFreeMemory []GpuInfo
|
||||
func (a ByFreeMemory) Len() int { return len(a) }
|
||||
func (a ByFreeMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByFreeMemory) Less(i, j int) bool { return a[i].FreeMemory < a[j].FreeMemory }
|
||||
|
||||
type CPUCapability uint32
|
||||
|
||||
// Override at build time when building base GPU runners
|
||||
var GPURunnerCPUCapability = CPUCapabilityAVX
|
||||
|
||||
const (
|
||||
CPUCapabilityNone CPUCapability = iota
|
||||
CPUCapabilityAVX
|
||||
CPUCapabilityAVX2
|
||||
// TODO AVX512
|
||||
)
|
||||
|
||||
func (c CPUCapability) String() string {
|
||||
switch c {
|
||||
case CPUCapabilityAVX:
|
||||
return "avx"
|
||||
case CPUCapabilityAVX2:
|
||||
return "avx2"
|
||||
default:
|
||||
return "no vector extensions"
|
||||
}
|
||||
}
|
||||
|
@@ -19,17 +19,19 @@ func TestMultiModelConcurrency(t *testing.T) {
|
||||
var (
|
||||
req = [2]api.GenerateRequest{
|
||||
{
|
||||
Model: "orca-mini",
|
||||
Prompt: "why is the ocean blue?",
|
||||
Stream: &stream,
|
||||
Model: "orca-mini",
|
||||
Prompt: "why is the ocean blue?",
|
||||
Stream: &stream,
|
||||
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
},
|
||||
}, {
|
||||
Model: "tinydolphin",
|
||||
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||
Stream: &stream,
|
||||
Model: "tinydolphin",
|
||||
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||
Stream: &stream,
|
||||
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
@@ -38,42 +40,64 @@ func TestMultiModelConcurrency(t *testing.T) {
|
||||
}
|
||||
resp = [2][]string{
|
||||
[]string{"sunlight"},
|
||||
[]string{"england", "english", "massachusetts", "pilgrims"},
|
||||
[]string{"england", "english", "massachusetts", "pilgrims", "british"},
|
||||
}
|
||||
)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(req))
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*240)
|
||||
defer cancel()
|
||||
|
||||
client, _, cleanup := InitServerConnection(ctx, t)
|
||||
defer cleanup()
|
||||
|
||||
for i := 0; i < len(req); i++ {
|
||||
require.NoError(t, PullIfMissing(ctx, client, req[i].Model))
|
||||
}
|
||||
|
||||
for i := 0; i < len(req); i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
GenerateTestHelper(ctx, t, req[i], resp[i])
|
||||
DoGenerate(ctx, t, client, req[i], resp[i], 60*time.Second, 10*time.Second)
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) // GTX 750 2G card takes ~9 minutes
|
||||
req, resp := GenerateRequests()
|
||||
reqLimit := len(req)
|
||||
iterLimit := 5
|
||||
|
||||
vram := os.Getenv("OLLAMA_MAX_VRAM")
|
||||
if vram != "" {
|
||||
max, err := strconv.ParseUint(vram, 10, 64)
|
||||
require.NoError(t, err)
|
||||
// Don't hammer on small VRAM cards...
|
||||
if max < 4*1024*1024*1024 {
|
||||
reqLimit = min(reqLimit, 2)
|
||||
iterLimit = 2
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 9*time.Minute)
|
||||
defer cancel()
|
||||
client, _, cleanup := InitServerConnection(ctx, t)
|
||||
defer cleanup()
|
||||
|
||||
req, resp := GenerateRequests()
|
||||
// Get the server running (if applicable) warm the model up with a single initial request
|
||||
DoGenerate(ctx, t, client, req[0], resp[0], 60*time.Second, 5*time.Second)
|
||||
DoGenerate(ctx, t, client, req[0], resp[0], 60*time.Second, 10*time.Second)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(req))
|
||||
for i := 0; i < len(req); i++ {
|
||||
wg.Add(reqLimit)
|
||||
for i := 0; i < reqLimit; i++ {
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
for j := 0; j < 5; j++ {
|
||||
for j := 0; j < iterLimit; j++ {
|
||||
slog.Info("Starting", "req", i, "iter", j)
|
||||
// On slower GPUs it can take a while to process the 4 concurrent requests
|
||||
// On slower GPUs it can take a while to process the concurrent requests
|
||||
// so we allow a much longer initial timeout
|
||||
DoGenerate(ctx, t, client, req[i], resp[i], 90*time.Second, 5*time.Second)
|
||||
DoGenerate(ctx, t, client, req[i], resp[i], 120*time.Second, 20*time.Second)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
@@ -221,5 +245,23 @@ func TestMultiModelStress(t *testing.T) {
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(2 * time.Second)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
models, err := client.ListRunning(ctx)
|
||||
if err != nil {
|
||||
slog.Warn("failed to list running models", "error", err)
|
||||
continue
|
||||
}
|
||||
for _, m := range models.Models {
|
||||
slog.Info("loaded model snapshot", "model", m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
@@ -11,7 +11,8 @@ import (
|
||||
)
|
||||
|
||||
func TestContextExhaustion(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) // TODO maybe shorter?
|
||||
// Longer needed for small footprint GPUs
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
|
||||
defer cancel()
|
||||
// Set up the test data
|
||||
req := api.GenerateRequest{
|
||||
|
@@ -32,7 +32,11 @@ func TestIntegrationMultimodal(t *testing.T) {
|
||||
resp := "the ollam"
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||
defer cancel()
|
||||
GenerateTestHelper(ctx, t, req, []string{resp})
|
||||
client, _, cleanup := InitServerConnection(ctx, t)
|
||||
defer cleanup()
|
||||
require.NoError(t, PullIfMissing(ctx, client, req.Model))
|
||||
// llava models on CPU can be quite slow to start,
|
||||
DoGenerate(ctx, t, client, req, []string{resp}, 120*time.Second, 30*time.Second)
|
||||
}
|
||||
|
||||
const imageEncoding = `iVBORw0KGgoAAAANSUhEUgAAANIAAAB4CAYAAACHHqzKAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEb
|
||||
|
@@ -140,7 +140,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er
|
||||
|
||||
showCtx, cancel := context.WithDeadlineCause(
|
||||
ctx,
|
||||
time.Now().Add(5*time.Second),
|
||||
time.Now().Add(10*time.Second),
|
||||
fmt.Errorf("show for existing model %s took too long", modelName),
|
||||
)
|
||||
defer cancel()
|
||||
@@ -287,41 +287,46 @@ func DoGenerate(ctx context.Context, t *testing.T, client *api.Client, genReq ap
|
||||
func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
||||
return []api.GenerateRequest{
|
||||
{
|
||||
Model: "orca-mini",
|
||||
Prompt: "why is the ocean blue?",
|
||||
Stream: &stream,
|
||||
Model: "orca-mini",
|
||||
Prompt: "why is the ocean blue?",
|
||||
Stream: &stream,
|
||||
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
},
|
||||
}, {
|
||||
Model: "orca-mini",
|
||||
Prompt: "why is the color of dirt brown?",
|
||||
Stream: &stream,
|
||||
Model: "orca-mini",
|
||||
Prompt: "why is the color of dirt brown?",
|
||||
Stream: &stream,
|
||||
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
},
|
||||
}, {
|
||||
Model: "orca-mini",
|
||||
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||
Stream: &stream,
|
||||
Model: "orca-mini",
|
||||
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||
Stream: &stream,
|
||||
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
},
|
||||
}, {
|
||||
Model: "orca-mini",
|
||||
Prompt: "what is the origin of independence day?",
|
||||
Stream: &stream,
|
||||
Model: "orca-mini",
|
||||
Prompt: "what is the origin of independence day?",
|
||||
Stream: &stream,
|
||||
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
},
|
||||
}, {
|
||||
Model: "orca-mini",
|
||||
Prompt: "what is the composition of air?",
|
||||
Stream: &stream,
|
||||
Model: "orca-mini",
|
||||
Prompt: "what is the composition of air?",
|
||||
Stream: &stream,
|
||||
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||
Options: map[string]interface{}{
|
||||
"seed": 42,
|
||||
"temperature": 0.0,
|
||||
@@ -331,7 +336,7 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
||||
[][]string{
|
||||
[]string{"sunlight"},
|
||||
[]string{"soil", "organic", "earth", "black", "tan"},
|
||||
[]string{"england", "english", "massachusetts", "pilgrims"},
|
||||
[]string{"england", "english", "massachusetts", "pilgrims", "british"},
|
||||
[]string{"fourth", "july", "declaration", "independence"},
|
||||
[]string{"nitrogen", "oxygen", "carbon", "dioxide"},
|
||||
}
|
||||
|
25
llm/ext_server/CMakeLists.txt
vendored
25
llm/ext_server/CMakeLists.txt
vendored
@@ -1,14 +1,13 @@
|
||||
|
||||
set(TARGET ollama_llama_server)
|
||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_compile_definitions(${TARGET} PRIVATE
|
||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||
)
|
||||
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||
if (WIN32)
|
||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||
endif()
|
||||
set(TARGET ollama_llama_server)
|
||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||
add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_compile_definitions(${TARGET} PRIVATE
|
||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||
)
|
||||
target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||
if (WIN32)
|
||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||
endif()
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
90
llm/ext_server/server.cpp
vendored
90
llm/ext_server/server.cpp
vendored
@@ -56,7 +56,6 @@ struct server_params {
|
||||
std::string hostname = "127.0.0.1";
|
||||
std::vector<std::string> api_keys;
|
||||
std::string public_path = "examples/server/public";
|
||||
std::string chat_template = "";
|
||||
int32_t port = 8080;
|
||||
int32_t read_timeout = 600;
|
||||
int32_t write_timeout = 600;
|
||||
@@ -427,16 +426,6 @@ struct llama_server_context
|
||||
return true;
|
||||
}
|
||||
|
||||
void validate_model_chat_template(server_params & sparams) {
|
||||
llama_chat_message chat[] = {{"user", "test"}};
|
||||
std::vector<char> buf(1);
|
||||
int res = llama_chat_apply_template(model, nullptr, chat, 1, true, buf.data(), buf.size());
|
||||
if (res < 0) {
|
||||
LOG_ERROR("The chat template comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
|
||||
sparams.chat_template = "chatml";
|
||||
}
|
||||
}
|
||||
|
||||
void initialize() {
|
||||
// create slots
|
||||
all_slots_are_idle = true;
|
||||
@@ -1393,12 +1382,50 @@ struct llama_server_context
|
||||
}
|
||||
}
|
||||
|
||||
std::string common_prefix(const std::string& str1, const std::string& str2) {
|
||||
auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin());
|
||||
return std::string(str1.begin(), mismatch_pair.first);
|
||||
}
|
||||
|
||||
// Find the slot that has the greatest common prefix
|
||||
server_slot *prefix_slot(const json &prompt) {
|
||||
if (!prompt.is_string()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::string prompt_str = prompt.get<std::string>();
|
||||
server_slot *slot = nullptr;
|
||||
size_t longest = 0;
|
||||
|
||||
for (server_slot &s : slots) {
|
||||
if (s.available() && s.prompt.is_string()) {
|
||||
std::string s_prompt = s.prompt.get<std::string>();
|
||||
std::string prefix = common_prefix(s_prompt, prompt_str);
|
||||
|
||||
if (prefix.size() > longest) {
|
||||
slot = &s;
|
||||
longest = prefix.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!slot) {
|
||||
return get_slot(-1);
|
||||
}
|
||||
|
||||
LOG_DEBUG("slot with common prefix found", {{
|
||||
"slot_id", slot->id,
|
||||
"characters", longest
|
||||
}});
|
||||
return slot;
|
||||
}
|
||||
|
||||
void process_single_task(task_server& task)
|
||||
{
|
||||
switch (task.type)
|
||||
{
|
||||
case TASK_TYPE_COMPLETION: {
|
||||
server_slot *slot = get_slot(json_value(task.data, "slot_id", -1));
|
||||
server_slot *slot = prefix_slot(task.data["prompt"]);
|
||||
if (slot == nullptr)
|
||||
{
|
||||
// if no slot is available, we defer this task for processing later
|
||||
@@ -1665,22 +1692,23 @@ struct llama_server_context
|
||||
if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
|
||||
{
|
||||
const int n_left = slot.n_ctx - slot.params.n_keep;
|
||||
const int n_block_size = n_left / 2;
|
||||
const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
|
||||
const int n_shift = n_left / 2;
|
||||
const int n_erase = slot.n_prompt_tokens - slot.params.n_keep - n_shift;
|
||||
|
||||
std::vector<llama_token> new_tokens(
|
||||
prompt_tokens.begin(),
|
||||
prompt_tokens.begin() + slot.params.n_keep);
|
||||
new_tokens.insert(
|
||||
new_tokens.end(),
|
||||
prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size,
|
||||
prompt_tokens.begin() + slot.params.n_keep + n_erase,
|
||||
prompt_tokens.end());
|
||||
|
||||
LOG_VERBOSE("input truncated", {
|
||||
{"n_ctx", slot.n_ctx},
|
||||
{"n_keep", slot.params.n_keep},
|
||||
{"n_left", n_left},
|
||||
{"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
|
||||
LOG_INFO("input truncated", {
|
||||
{"n_ctx", slot.n_ctx},
|
||||
{"n_keep", slot.params.n_keep},
|
||||
{"n_left", n_left},
|
||||
{"n_shift", n_shift},
|
||||
{"n_erase", n_erase},
|
||||
});
|
||||
slot.truncated = true;
|
||||
prompt_tokens = new_tokens;
|
||||
@@ -1715,7 +1743,7 @@ struct llama_server_context
|
||||
slot.n_past -= 1;
|
||||
}
|
||||
|
||||
slot.n_prompt_tokens_processed = slot.n_prompt_tokens - slot.n_past;
|
||||
slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
|
||||
|
||||
if (slot.ga_n != 1)
|
||||
{
|
||||
@@ -2335,9 +2363,9 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
#ifndef GGML_USE_CUBLAS
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n");
|
||||
#endif // GGML_USE_CUBLAS
|
||||
#ifndef GGML_USE_CUDA
|
||||
fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n");
|
||||
#endif // GGML_USE_CUDA
|
||||
}
|
||||
else if (arg == "--tensor-split" || arg == "-ts")
|
||||
{
|
||||
@@ -2346,7 +2374,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
|
||||
#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
|
||||
std::string arg_next = argv[i];
|
||||
|
||||
// split string by , and /
|
||||
@@ -2367,8 +2395,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
|
||||
}
|
||||
}
|
||||
#else
|
||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
|
||||
#endif // GGML_USE_CUBLAS
|
||||
LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n", {});
|
||||
#endif // GGML_USE_CUDA
|
||||
}
|
||||
else if (arg == "--main-gpu" || arg == "-mg")
|
||||
{
|
||||
@@ -2377,7 +2405,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
|
||||
#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
|
||||
params.main_gpu = std::stoi(argv[i]);
|
||||
#else
|
||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
|
||||
@@ -2535,7 +2563,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, g
|
||||
invalid_param = true;
|
||||
break;
|
||||
}
|
||||
sparams.chat_template = argv[i];
|
||||
}
|
||||
else if (arg == "--override-kv")
|
||||
{
|
||||
@@ -3008,11 +3035,6 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
const auto model_meta = llama.model_meta();
|
||||
|
||||
if (sparams.chat_template.empty()) { // custom chat template is not supplied
|
||||
// check if the template comes with the model is supported by us
|
||||
llama.validate_model_chat_template(sparams);
|
||||
}
|
||||
|
||||
// Middleware for API key validation
|
||||
auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
|
||||
// If API key is not set, skip validation
|
||||
|
@@ -18,16 +18,16 @@ sign() {
|
||||
fi
|
||||
}
|
||||
|
||||
COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DLLAMA_METAL_EMBED_LIBRARY=on"
|
||||
COMMON_DARWIN_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off"
|
||||
|
||||
case "${GOARCH}" in
|
||||
"amd64")
|
||||
COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_NATIVE=off"
|
||||
COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DGGML_METAL=off -DGGML_NATIVE=off"
|
||||
|
||||
# Static build for linking into the Go binary
|
||||
init_vars
|
||||
CMAKE_TARGETS="--target llama --target ggml"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/darwin/${ARCH}_static"
|
||||
echo "Building static library"
|
||||
build
|
||||
@@ -37,7 +37,7 @@ case "${GOARCH}" in
|
||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||
#
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/darwin/${ARCH}/cpu"
|
||||
echo "Building LCD CPU"
|
||||
build
|
||||
@@ -49,7 +49,7 @@ case "${GOARCH}" in
|
||||
# Approximately 400% faster than LCD on same CPU
|
||||
#
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx"
|
||||
echo "Building AVX CPU"
|
||||
build
|
||||
@@ -61,7 +61,7 @@ case "${GOARCH}" in
|
||||
# Approximately 10% faster than AVX on same CPU
|
||||
#
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=on -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=on -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2"
|
||||
echo "Building AVX2 CPU"
|
||||
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation"
|
||||
@@ -75,14 +75,14 @@ case "${GOARCH}" in
|
||||
# Static build for linking into the Go binary
|
||||
init_vars
|
||||
CMAKE_TARGETS="--target llama --target ggml"
|
||||
CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/darwin/${ARCH}_static"
|
||||
echo "Building static library"
|
||||
build
|
||||
|
||||
if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DLLAMA_ACCELERATE=on -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=on ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/darwin/${ARCH}/metal"
|
||||
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders"
|
||||
build
|
||||
|
@@ -51,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then
|
||||
export CUDACXX=$(command -v nvcc)
|
||||
fi
|
||||
fi
|
||||
COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off"
|
||||
COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
|
||||
source $(dirname $0)/gen_common.sh
|
||||
init_vars
|
||||
git_module_setup
|
||||
@@ -64,7 +64,7 @@ if [ -z "${OLLAMA_SKIP_STATIC_GENERATE}" -o "${OLLAMA_CPU_TARGET}" = "static" ];
|
||||
# Static build for linking into the Go binary
|
||||
init_vars
|
||||
CMAKE_TARGETS="--target llama --target ggml"
|
||||
CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DGGML_NATIVE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/linux/${ARCH}_static"
|
||||
echo "Building static library"
|
||||
build
|
||||
@@ -77,29 +77,29 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||
if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then
|
||||
init_vars
|
||||
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
|
||||
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
||||
echo "Building custom CPU"
|
||||
build
|
||||
compress
|
||||
else
|
||||
# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
|
||||
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||
# -DLLAMA_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
|
||||
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||
# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||
# -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
|
||||
# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||
# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||
# Note: the following seem to yield slower results than AVX2 - ymmv
|
||||
# -DLLAMA_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
|
||||
# -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake
|
||||
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
|
||||
# -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
|
||||
# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
|
||||
# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
|
||||
|
||||
COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off"
|
||||
COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
|
||||
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
||||
#
|
||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||
#
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
||||
echo "Building LCD CPU"
|
||||
build
|
||||
@@ -116,7 +116,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||
# Approximately 400% faster than LCD on same CPU
|
||||
#
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/linux/${ARCH}/cpu_avx"
|
||||
echo "Building AVX CPU"
|
||||
build
|
||||
@@ -129,7 +129,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||
# Approximately 10% faster than AVX on same CPU
|
||||
#
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
|
||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
|
||||
BUILD_DIR="../build/linux/${ARCH}/cpu_avx2"
|
||||
echo "Building AVX2 CPU"
|
||||
build
|
||||
@@ -170,15 +170,15 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then
|
||||
#
|
||||
# CUDA compute < 6.0 lacks proper FP16 support on ARM.
|
||||
# Disabling has minimal performance effect while maintaining compatibility.
|
||||
ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off"
|
||||
ARM64_DEFS="-DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_CUDA_F16=off"
|
||||
fi
|
||||
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
||||
if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then
|
||||
echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\""
|
||||
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}"
|
||||
CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}"
|
||||
echo "Building custom CUDA GPU"
|
||||
else
|
||||
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}"
|
||||
CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DGGML_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat"
|
||||
fi
|
||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}"
|
||||
BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}"
|
||||
@@ -216,7 +216,7 @@ if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then
|
||||
init_vars
|
||||
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
|
||||
CC=icx
|
||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL=ON -DLLAMA_SYCL_F16=OFF"
|
||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DGGML_SYCL_F16=OFF"
|
||||
BUILD_DIR="../build/linux/${ARCH}/oneapi"
|
||||
EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb"
|
||||
DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it
|
||||
@@ -254,7 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
|
||||
ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)
|
||||
fi
|
||||
init_vars
|
||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
|
||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
|
||||
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
||||
if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then
|
||||
echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\""
|
||||
|
@@ -39,7 +39,8 @@ function init_vars {
|
||||
}
|
||||
$script:cmakeDefs = @(
|
||||
"-DBUILD_SHARED_LIBS=on",
|
||||
"-DLLAMA_NATIVE=off"
|
||||
"-DGGML_NATIVE=off",
|
||||
"-DGGML_OPENMP=off"
|
||||
)
|
||||
$script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on")
|
||||
$script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower()
|
||||
@@ -122,8 +123,13 @@ function build {
|
||||
& cmake --version
|
||||
& cmake -S "${script:llamacppDir}" -B $script:buildDir $script:cmakeDefs
|
||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
||||
write-host "building with: cmake --build $script:buildDir --config $script:config $($script:cmakeTargets | ForEach-Object { `"--target`", $_ })"
|
||||
& cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ })
|
||||
if ($cmakeDefs -contains "-G") {
|
||||
$extra=@("-j8")
|
||||
} else {
|
||||
$extra= @("--", "/p:CL_MPcount=8")
|
||||
}
|
||||
write-host "building with: cmake --build $script:buildDir --config $script:config $($script:cmakeTargets | ForEach-Object { `"--target`", $_ }) $extra"
|
||||
& cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ }) $extra
|
||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
||||
# Rearrange output to be consistent between different generators
|
||||
if ($null -ne ${script:config} -And (test-path -path "${script:buildDir}/bin/${script:config}" ) ) {
|
||||
@@ -176,9 +182,9 @@ function cleanup {
|
||||
}
|
||||
|
||||
|
||||
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||
# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||
# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||
# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||
|
||||
|
||||
function build_static() {
|
||||
@@ -198,12 +204,13 @@ function build_static() {
|
||||
"-DCMAKE_C_COMPILER=gcc.exe",
|
||||
"-DCMAKE_CXX_COMPILER=g++.exe",
|
||||
"-DBUILD_SHARED_LIBS=off",
|
||||
"-DLLAMA_NATIVE=off",
|
||||
"-DLLAMA_AVX=off",
|
||||
"-DLLAMA_AVX2=off",
|
||||
"-DLLAMA_AVX512=off",
|
||||
"-DLLAMA_F16C=off",
|
||||
"-DLLAMA_FMA=off")
|
||||
"-DGGML_NATIVE=off",
|
||||
"-DGGML_AVX=off",
|
||||
"-DGGML_AVX2=off",
|
||||
"-DGGML_AVX512=off",
|
||||
"-DGGML_F16C=off",
|
||||
"-DGGML_FMA=off",
|
||||
"-DGGML_OPENMP=off")
|
||||
$script:buildDir="../build/windows/${script:ARCH}_static"
|
||||
write-host "Building static library"
|
||||
build
|
||||
@@ -217,7 +224,7 @@ function build_cpu($gen_arch) {
|
||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) {
|
||||
# remaining llama.cpp builds use MSVC
|
||||
init_vars
|
||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DLLAMA_AVX=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs
|
||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
|
||||
$script:buildDir="../build/windows/${script:ARCH}/cpu"
|
||||
$script:distDir="$script:DIST_BASE\cpu"
|
||||
write-host "Building LCD CPU"
|
||||
@@ -232,7 +239,7 @@ function build_cpu($gen_arch) {
|
||||
function build_cpu_avx() {
|
||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) {
|
||||
init_vars
|
||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs
|
||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
|
||||
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx"
|
||||
$script:distDir="$script:DIST_BASE\cpu_avx"
|
||||
write-host "Building AVX CPU"
|
||||
@@ -247,7 +254,7 @@ function build_cpu_avx() {
|
||||
function build_cpu_avx2() {
|
||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) {
|
||||
init_vars
|
||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=on", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=on", "-DLLAMA_F16C=on") + $script:cmakeDefs
|
||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=on", "-DGGML_AVX512=off", "-DGGML_FMA=on", "-DGGML_F16C=on") + $script:cmakeDefs
|
||||
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx2"
|
||||
$script:distDir="$script:DIST_BASE\cpu_avx2"
|
||||
write-host "Building AVX2 CPU"
|
||||
@@ -270,7 +277,15 @@ function build_cuda() {
|
||||
init_vars
|
||||
$script:buildDir="../build/windows/${script:ARCH}/cuda$script:CUDA_VARIANT"
|
||||
$script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT"
|
||||
$script:cmakeDefs += @("-A", "x64", "-DLLAMA_CUDA=ON", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR", "-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}")
|
||||
$script:cmakeDefs += @(
|
||||
"-A", "x64",
|
||||
"-DGGML_CUDA=ON",
|
||||
"-DGGML_AVX=on",
|
||||
"-DGGML_AVX2=off",
|
||||
"-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR",
|
||||
"-DCMAKE_CUDA_FLAGS=-t8",
|
||||
"-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}"
|
||||
)
|
||||
if ($null -ne $env:OLLAMA_CUSTOM_CUDA_DEFS) {
|
||||
write-host "OLLAMA_CUSTOM_CUDA_DEFS=`"${env:OLLAMA_CUSTOM_CUDA_DEFS}`""
|
||||
$script:cmakeDefs +=@("${env:OLLAMA_CUSTOM_CUDA_DEFS}")
|
||||
@@ -280,10 +295,12 @@ function build_cuda() {
|
||||
sign
|
||||
install
|
||||
|
||||
write-host "copying CUDA dependencies to ${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
||||
cp "${script:CUDA_LIB_DIR}\cudart64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
||||
cp "${script:CUDA_LIB_DIR}\cublas64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
||||
cp "${script:CUDA_LIB_DIR}\cublasLt64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
||||
rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||
md "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\" -ea 0 > $null
|
||||
write-host "copying CUDA dependencies to ${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||
cp "${script:CUDA_LIB_DIR}\cudart64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||
cp "${script:CUDA_LIB_DIR}\cublas64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||
cp "${script:CUDA_LIB_DIR}\cublasLt64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||
} else {
|
||||
write-host "Skipping CUDA generation step"
|
||||
}
|
||||
@@ -302,7 +319,7 @@ function build_oneapi() {
|
||||
$script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT"
|
||||
$script:cmakeDefs += @(
|
||||
"-G", "MinGW Makefiles",
|
||||
"-DLLAMA_SYCL=ON",
|
||||
"-DGGML_SYCL=ON",
|
||||
"-DCMAKE_C_COMPILER=icx",
|
||||
"-DCMAKE_CXX_COMPILER=icx",
|
||||
"-DCMAKE_BUILD_TYPE=Release"
|
||||
@@ -317,16 +334,18 @@ function build_oneapi() {
|
||||
sign
|
||||
install
|
||||
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libirngmd.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libmmd.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_level_zero.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_unified_runtime.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_win_proxy_loader.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\svml_dispmd.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\sycl7.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_core.2.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_sycl_blas.4.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_tbb_thread.2.dll" "${script:distDir}"
|
||||
rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
md "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\" -ea 0 > $null
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libirngmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libmmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_level_zero.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_unified_runtime.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_win_proxy_loader.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\svml_dispmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\sycl7.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_core.2.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_sycl_blas.4.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_tbb_thread.2.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||
} else {
|
||||
Write-Host "Skipping oneAPI generation step"
|
||||
}
|
||||
@@ -346,10 +365,10 @@ function build_rocm() {
|
||||
"-G", "Ninja",
|
||||
"-DCMAKE_C_COMPILER=clang.exe",
|
||||
"-DCMAKE_CXX_COMPILER=clang++.exe",
|
||||
"-DLLAMA_HIPBLAS=on",
|
||||
"-DGGML_HIPBLAS=on",
|
||||
"-DHIP_PLATFORM=amd",
|
||||
"-DLLAMA_AVX=on",
|
||||
"-DLLAMA_AVX2=off",
|
||||
"-DGGML_AVX=on",
|
||||
"-DGGML_AVX2=off",
|
||||
"-DCMAKE_POSITION_INDEPENDENT_CODE=on",
|
||||
"-DAMDGPU_TARGETS=$(amdGPUs)",
|
||||
"-DGPU_TARGETS=$(amdGPUs)"
|
||||
|
13
llm/ggla.go
13
llm/ggla.go
@@ -53,7 +53,7 @@ func (llm *ggla) Tensors() Tensors {
|
||||
return llm.tensors
|
||||
}
|
||||
|
||||
func (llm *ggla) decode(rs io.ReadSeeker) error {
|
||||
func (llm *ggla) decode(rs io.ReadSeeker) (retErr error) {
|
||||
var r uint32
|
||||
if err := binary.Read(rs, binary.LittleEndian, &r); err != nil {
|
||||
return err
|
||||
@@ -69,9 +69,18 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
|
||||
for {
|
||||
var dims uint32
|
||||
if err := binary.Read(rs, binary.LittleEndian, &dims); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if errors.Is(retErr, io.EOF) {
|
||||
retErr = io.ErrUnexpectedEOF
|
||||
}
|
||||
}()
|
||||
|
||||
var namesize uint32
|
||||
if err := binary.Read(rs, binary.LittleEndian, &namesize); err != nil {
|
||||
return err
|
||||
@@ -108,7 +117,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := rs.Seek((offset+31)&-32, io.SeekStart); err != nil {
|
||||
if _, err := rs.Seek((offset+31)&-32-offset, io.SeekCurrent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
86
llm/ggml.go
86
llm/ggml.go
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/util/bufioutil"
|
||||
)
|
||||
|
||||
type GGML struct {
|
||||
@@ -69,6 +71,30 @@ func (kv KV) HeadCountKV() uint64 {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (kv KV) EmbeddingHeadCount() uint64 {
|
||||
if heads := kv.HeadCount(); heads > 0 {
|
||||
return kv.EmbeddingLength() / kv.HeadCount()
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (kv KV) EmbeddingHeadCountK() uint64 {
|
||||
if k := kv.u64(fmt.Sprintf("%s.attention.key_length", kv.Architecture())); k > 0 {
|
||||
return k
|
||||
}
|
||||
|
||||
return kv.EmbeddingHeadCount()
|
||||
}
|
||||
|
||||
func (kv KV) EmbeddingHeadCountV() uint64 {
|
||||
if v := kv.u64(fmt.Sprintf("%s.attention.value_length", kv.Architecture())); v > 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
return kv.EmbeddingHeadCount()
|
||||
}
|
||||
|
||||
func (kv KV) GQA() uint64 {
|
||||
return kv.HeadCount() / kv.HeadCountKV()
|
||||
}
|
||||
@@ -254,7 +280,18 @@ func DetectGGMLType(b []byte) string {
|
||||
}
|
||||
}
|
||||
|
||||
func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) {
|
||||
// DecodeGGML decodes a GGML model from the given reader.
|
||||
//
|
||||
// It collects array values for arrays with a size less than or equal to
|
||||
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
|
||||
// the maxArraySize is negative, all arrays are collected.
|
||||
func DecodeGGML(rs io.ReadSeeker, maxArraySize int) (*GGML, int64, error) {
|
||||
if maxArraySize == 0 {
|
||||
maxArraySize = 1024
|
||||
}
|
||||
|
||||
rs = bufioutil.NewBufferedSeeker(rs, 32<<10)
|
||||
|
||||
var magic uint32
|
||||
if err := binary.Read(rs, binary.LittleEndian, &magic); err != nil {
|
||||
return nil, 0, err
|
||||
@@ -267,17 +304,15 @@ func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) {
|
||||
case FILE_MAGIC_GGLA:
|
||||
c = &containerGGLA{}
|
||||
case FILE_MAGIC_GGUF_LE:
|
||||
c = &containerGGUF{ByteOrder: binary.LittleEndian}
|
||||
c = &containerGGUF{ByteOrder: binary.LittleEndian, maxArraySize: maxArraySize}
|
||||
case FILE_MAGIC_GGUF_BE:
|
||||
c = &containerGGUF{ByteOrder: binary.BigEndian}
|
||||
c = &containerGGUF{ByteOrder: binary.BigEndian, maxArraySize: maxArraySize}
|
||||
default:
|
||||
return nil, 0, errors.New("invalid file magic")
|
||||
}
|
||||
|
||||
model, err := c.Decode(rs)
|
||||
if errors.Is(err, io.EOF) {
|
||||
// noop
|
||||
} else if err != nil {
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
@@ -297,7 +332,10 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
||||
embedding := llm.KV().EmbeddingLength()
|
||||
heads := llm.KV().HeadCount()
|
||||
headsKV := llm.KV().HeadCountKV()
|
||||
vocab := uint64(len(llm.KV()["tokenizer.ggml.tokens"].([]any)))
|
||||
vocab := uint64(llm.KV()["tokenizer.ggml.tokens"].(*array).size)
|
||||
|
||||
embeddingHeads := llm.KV().EmbeddingHeadCount()
|
||||
embeddingHeadsK := llm.KV().EmbeddingHeadCountK()
|
||||
|
||||
layers := llm.Tensors().Layers()
|
||||
|
||||
@@ -307,7 +345,8 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
||||
|
||||
partialOffload = 4 * batch * embedding
|
||||
partialOffload += max(
|
||||
4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embedding/heads*headsKV),
|
||||
// 4*batch*(4+6*embedding+context*(2*heads)+llm.KV().GQA()),
|
||||
4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embeddingHeads*headsKV),
|
||||
4*batch*(embedding+vocab)+embedding*vocab*105/128,
|
||||
)
|
||||
|
||||
@@ -315,21 +354,30 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
||||
// mixtral 8x22b
|
||||
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
|
||||
partialOffload = max(
|
||||
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV),
|
||||
4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch),
|
||||
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embeddingHeads*headsKV),
|
||||
4*(context*batch*heads+context*embeddingHeads*headsKV+batch*1024+embeddingHeads*headsKV*batch),
|
||||
)
|
||||
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
|
||||
// mixtral 8x7b
|
||||
ffnGateWeight1 := ffnGateWeight.Shape[1]
|
||||
fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1)
|
||||
partialOffload = max(
|
||||
4*batch*(3+embedding/heads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16,
|
||||
4*batch*(3+embeddingHeads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16,
|
||||
4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16),
|
||||
)
|
||||
}
|
||||
case "gemma":
|
||||
fullOffload = 4 * batch * (embedding + vocab)
|
||||
partialOffload = 4*batch*(2*embedding+vocab+1) + embedding*vocab*105/128
|
||||
case "gemma", "gemma2":
|
||||
fullOffload = max(
|
||||
4*batch*(embedding+vocab),
|
||||
4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads),
|
||||
)
|
||||
|
||||
partialOffload = max(
|
||||
4*embedding*batch+embedding*vocab*105/128+4*vocab*batch,
|
||||
4*batch*(2*embedding+1+2*embeddingHeadsK*heads+context+context*heads)+
|
||||
4*embeddingHeadsK*context*8+
|
||||
embedding*embeddingHeadsK*heads*9/16,
|
||||
)
|
||||
case "command-r":
|
||||
fullOffload = max(
|
||||
4*batch*(embedding+vocab),
|
||||
@@ -366,6 +414,16 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
||||
4*batch*(vocab+2*embedding),
|
||||
fullOffload,
|
||||
)
|
||||
case "deepseek2":
|
||||
fullOffload = max(
|
||||
4*batch*(3*embedding+vocab),
|
||||
4*batch*(3*embedding+2+context*(1+headsKV)+2*embeddingHeadsK*headsKV),
|
||||
)
|
||||
|
||||
partialOffload = max(
|
||||
4*batch*(3*embedding+vocab)+embedding*vocab*105/128,
|
||||
4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16,
|
||||
)
|
||||
}
|
||||
|
||||
return
|
||||
|
1
llm/ggml_test.go
Normal file
1
llm/ggml_test.go
Normal file
@@ -0,0 +1 @@
|
||||
package llm
|
130
llm/gguf.go
130
llm/gguf.go
@@ -3,11 +3,10 @@ package llm
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type containerGGUF struct {
|
||||
@@ -29,6 +28,12 @@ type containerGGUF struct {
|
||||
NumTensor uint64
|
||||
NumKV uint64
|
||||
}
|
||||
|
||||
maxArraySize int
|
||||
}
|
||||
|
||||
func (c *containerGGUF) canCollectArray(size int) bool {
|
||||
return c.maxArraySize < 0 || size <= c.maxArraySize
|
||||
}
|
||||
|
||||
func (c *containerGGUF) Name() string {
|
||||
@@ -54,7 +59,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) {
|
||||
}
|
||||
|
||||
model := newGGUF(c)
|
||||
slog.Debug(fmt.Sprintf("model = %#v", model))
|
||||
if err := model.Decode(rs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -85,6 +89,8 @@ type gguf struct {
|
||||
tensors []*Tensor
|
||||
|
||||
parameters uint64
|
||||
|
||||
scratch [16 << 10]byte
|
||||
}
|
||||
|
||||
func newGGUF(container *containerGGUF) *gguf {
|
||||
@@ -181,34 +187,34 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
|
||||
}
|
||||
|
||||
// decode tensors
|
||||
for i := 0; uint64(i) < llm.numTensor(); i++ {
|
||||
for range llm.numTensor() {
|
||||
name, err := readGGUFString(llm, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read tensor name: %w", err)
|
||||
}
|
||||
|
||||
// dims is the number of dimensions in the tensor
|
||||
dims, err := readGGUF[uint32](llm, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read tensor dimensions: %w", err)
|
||||
}
|
||||
|
||||
shape := [4]uint64{1, 1, 1, 1}
|
||||
for i := 0; uint32(i) < dims; i++ {
|
||||
shape[i], err = readGGUF[uint64](llm, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read tensor shape: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
kind, err := readGGUF[uint32](llm, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read tensor kind: %w", err)
|
||||
}
|
||||
|
||||
offset, err := readGGUF[uint64](llm, rs)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to read tensor offset: %w", err)
|
||||
}
|
||||
|
||||
tensor := Tensor{
|
||||
@@ -230,24 +236,19 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
|
||||
alignment = 32
|
||||
}
|
||||
|
||||
offset, err := rs.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
padding := llm.padding(offset, int64(alignment))
|
||||
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, tensor := range llm.tensors {
|
||||
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
|
||||
return err
|
||||
offset, err := rs.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current offset: %w", err)
|
||||
}
|
||||
|
||||
padding := llm.padding(int64(tensor.Size()), int64(alignment))
|
||||
padding := llm.padding(offset, int64(alignment))
|
||||
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to seek to init padding: %w", err)
|
||||
}
|
||||
|
||||
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
|
||||
return fmt.Errorf("failed to seek to tensor: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,22 +286,48 @@ func readGGUFV1String(llm *gguf, r io.Reader) (string, error) {
|
||||
return b.String(), nil
|
||||
}
|
||||
|
||||
func discardGGUFString(llm *gguf, r io.Reader) error {
|
||||
buf := llm.scratch[:8]
|
||||
_, err := io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
size := int(llm.ByteOrder.Uint64(buf))
|
||||
for size > 0 {
|
||||
n, err := r.Read(llm.scratch[:min(size, cap(llm.scratch))])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size -= n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func readGGUFString(llm *gguf, r io.Reader) (string, error) {
|
||||
if llm.Version == 1 {
|
||||
return readGGUFV1String(llm, r)
|
||||
}
|
||||
|
||||
var length uint64
|
||||
if err := binary.Read(r, llm.ByteOrder, &length); err != nil {
|
||||
buf := llm.scratch[:8]
|
||||
_, err := io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if _, err := io.CopyN(&b, r, int64(length)); err != nil {
|
||||
length := int(llm.ByteOrder.Uint64(buf))
|
||||
if length > len(llm.scratch) {
|
||||
buf = make([]byte, length)
|
||||
} else {
|
||||
buf = llm.scratch[:length]
|
||||
}
|
||||
clear(buf)
|
||||
|
||||
_, err = io.ReadFull(r, buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return b.String(), nil
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func writeGGUFString(llm *gguf, w io.Writer, s string) error {
|
||||
@@ -316,7 +343,16 @@ func writeGGUFString(llm *gguf, w io.Writer, s string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
|
||||
type array struct {
|
||||
size int
|
||||
values []any
|
||||
}
|
||||
|
||||
func (a *array) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(a.values)
|
||||
}
|
||||
|
||||
func readGGUFV1Array(llm *gguf, r io.Reader) (*array, error) {
|
||||
t, err := readGGUF[uint32](llm, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -327,7 +363,12 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; uint32(i) < n; i++ {
|
||||
a := &array{size: int(n)}
|
||||
if llm.canCollectArray(int(n)) {
|
||||
a.values = make([]any, 0, int(n))
|
||||
}
|
||||
|
||||
for i := range n {
|
||||
var e any
|
||||
switch t {
|
||||
case ggufTypeUint8:
|
||||
@@ -361,13 +402,15 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a = append(a, e)
|
||||
if a.values != nil {
|
||||
a.values[i] = e
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
||||
func readGGUFArray(llm *gguf, r io.Reader) (*array, error) {
|
||||
if llm.Version == 1 {
|
||||
return readGGUFV1Array(llm, r)
|
||||
}
|
||||
@@ -382,7 +425,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := 0; uint64(i) < n; i++ {
|
||||
a := &array{size: int(n)}
|
||||
if llm.canCollectArray(int(n)) {
|
||||
a.values = make([]any, int(n))
|
||||
}
|
||||
|
||||
for i := range n {
|
||||
var e any
|
||||
switch t {
|
||||
case ggufTypeUint8:
|
||||
@@ -408,7 +456,11 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
||||
case ggufTypeBool:
|
||||
e, err = readGGUF[bool](llm, r)
|
||||
case ggufTypeString:
|
||||
e, err = readGGUFString(llm, r)
|
||||
if a.values != nil {
|
||||
e, err = readGGUFString(llm, r)
|
||||
} else {
|
||||
err = discardGGUFString(llm, r)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid array type: %d", t)
|
||||
}
|
||||
@@ -416,10 +468,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a = append(a, e)
|
||||
if a.values != nil {
|
||||
a.values[i] = e
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func writeGGUFArray[S ~[]E, E any](llm *gguf, w io.Writer, t uint32, s S) error {
|
||||
|
Submodule llm/llama.cpp updated: 5921b8f089...a8db2a9ce6
15
llm/llm.go
15
llm/llm.go
@@ -1,12 +1,13 @@
|
||||
package llm
|
||||
|
||||
// #cgo CFLAGS: -Illama.cpp
|
||||
// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/libllama.a -lstdc++
|
||||
// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/libllama.a -lstdc++
|
||||
// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/libllama.a -static -lstdc++
|
||||
// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/libllama.a -static -lstdc++
|
||||
// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/libllama.a -lstdc++
|
||||
// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/libllama.a -lstdc++
|
||||
// #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include
|
||||
// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread
|
||||
// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal
|
||||
// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src
|
||||
// #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src
|
||||
// #cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src
|
||||
// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src
|
||||
// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
|
||||
// #include <stdlib.h>
|
||||
// #include "llama.h"
|
||||
import "C"
|
||||
|
315
llm/memory.go
315
llm/memory.go
@@ -3,9 +3,10 @@ package llm
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
)
|
||||
@@ -16,7 +17,8 @@ func PredictServerFit(allGpus gpu.GpuInfoList, ggml *GGML, adapters, projectors
|
||||
var estimatedVRAM uint64
|
||||
for _, gpus := range allGpus.ByLibrary() {
|
||||
var layerCount int
|
||||
layerCount, estimatedVRAM, _ = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
layerCount, estimatedVRAM = estimate.Layers, estimate.VRAMSize
|
||||
if opts.NumGPU < 0 {
|
||||
if layerCount > 0 && layerCount >= int(ggml.KV().BlockCount()+1) {
|
||||
return true, estimatedVRAM
|
||||
@@ -30,24 +32,76 @@ func PredictServerFit(allGpus gpu.GpuInfoList, ggml *GGML, adapters, projectors
|
||||
return false, estimatedVRAM
|
||||
}
|
||||
|
||||
type MemoryEstimate struct {
|
||||
// How many layers we predict we can load
|
||||
Layers int
|
||||
|
||||
// The size of the graph which occupies the main GPU
|
||||
Graph uint64
|
||||
|
||||
// How much VRAM will be allocated given the number of layers we predict
|
||||
VRAMSize uint64
|
||||
|
||||
// The total size of the model if loaded into VRAM. If all layers are loaded, VRAMSize == TotalSize
|
||||
TotalSize uint64
|
||||
|
||||
// For multi-GPU scenarios, this provides the tensor split parameter
|
||||
TensorSplit string
|
||||
|
||||
// For multi-GPU scenarios, this is the size in bytes per GPU
|
||||
GPUSizes []uint64
|
||||
|
||||
// internal fields for logging purposes
|
||||
inferenceLibrary string
|
||||
layersRequested int
|
||||
layersModel int
|
||||
availableList []string
|
||||
kv uint64
|
||||
allocationsList []string
|
||||
memoryWeights uint64
|
||||
memoryLayerOutput uint64
|
||||
graphFullOffload uint64
|
||||
graphPartialOffload uint64
|
||||
}
|
||||
|
||||
// Given a model and one or more GPU targets, predict how many layers and bytes we can load, and the total size
|
||||
// The GPUs provided must all be the same Library
|
||||
func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts api.Options) (int, uint64, uint64) {
|
||||
var memoryAvailable uint64
|
||||
for _, info := range gpus {
|
||||
memoryAvailable += info.FreeMemory
|
||||
}
|
||||
if envconfig.MaxVRAM > 0 {
|
||||
memoryAvailable = envconfig.MaxVRAM
|
||||
}
|
||||
func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts api.Options) MemoryEstimate {
|
||||
// Graph size for a partial offload, applies to all GPUs
|
||||
var graphPartialOffload uint64
|
||||
|
||||
slog.Debug("evaluating", "library", gpus[0].Library, "gpu_count", len(gpus), "available", format.HumanBytes2(memoryAvailable))
|
||||
// Graph size when all layers are offloaded, applies to all GPUs
|
||||
var graphFullOffload uint64
|
||||
|
||||
// TODO - this is probably wrong, first GPU vs secondaries will have different overheads
|
||||
memoryMinimum := gpus[0].MinimumMemory
|
||||
// Final graph offload once we know full or partial
|
||||
var graphOffload uint64
|
||||
|
||||
// Projectors loaded into GPU0 only
|
||||
var projectorSize uint64
|
||||
|
||||
// Conditional output size on GPU 0
|
||||
var memoryLayerOutput uint64
|
||||
|
||||
// The sizes of a layer
|
||||
var layerSize uint64
|
||||
|
||||
// The sum of all the layer sizes (just for logging)
|
||||
var memoryWeights uint64
|
||||
|
||||
// True if all the layers are loaded
|
||||
var fullyLoaded bool
|
||||
|
||||
// Overflow that didn't fit into the GPU
|
||||
var overflow uint64
|
||||
|
||||
availableList := make([]string, len(gpus))
|
||||
for i, gpu := range gpus {
|
||||
availableList[i] = format.HumanBytes2(gpu.FreeMemory)
|
||||
}
|
||||
slog.Debug("evaluating", "library", gpus[0].Library, "gpu_count", len(gpus), "available", availableList)
|
||||
|
||||
for _, projector := range projectors {
|
||||
memoryMinimum += projectorMemoryRequirements(projector)
|
||||
projectorSize += projectorMemoryRequirements(projector)
|
||||
|
||||
// multimodal models require at least 2048 context
|
||||
opts.NumCtx = max(opts.NumCtx, 2048)
|
||||
@@ -56,127 +110,246 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
|
||||
layers := ggml.Tensors().Layers()
|
||||
// add one layer worth of memory as a buffer
|
||||
if blk0, ok := layers["blk.0"]; ok {
|
||||
memoryMinimum += blk0.size()
|
||||
layerSize = blk0.size()
|
||||
} else {
|
||||
slog.Warn("model missing blk.0 layer size")
|
||||
}
|
||||
|
||||
// fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv
|
||||
var kv uint64 = 2 * 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * ggml.KV().EmbeddingLength() / ggml.KV().HeadCount() * ggml.KV().HeadCountKV()
|
||||
// fp16 k,v = sizeof(float16) * n_ctx * n_layer * (n_embd_head_k + n_embd_head_v) * n_head_kv
|
||||
var kv uint64 = 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * (ggml.KV().EmbeddingHeadCountK() + ggml.KV().EmbeddingHeadCountV()) * ggml.KV().HeadCountKV()
|
||||
|
||||
graphPartialOffload, graphFullOffload := ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
|
||||
// KV is proportional to the number of layers
|
||||
layerSize += kv / ggml.KV().BlockCount()
|
||||
|
||||
graphPartialOffload, graphFullOffload = ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
|
||||
if graphPartialOffload == 0 {
|
||||
graphPartialOffload = ggml.KV().GQA() * kv / 6
|
||||
}
|
||||
|
||||
if graphFullOffload == 0 {
|
||||
graphFullOffload = graphPartialOffload
|
||||
}
|
||||
|
||||
graphFullOffload *= uint64(len(gpus))
|
||||
graphPartialOffload *= uint64(len(gpus))
|
||||
|
||||
// on metal there's no partial offload overhead
|
||||
if gpus[0].Library == "metal" {
|
||||
graphPartialOffload = graphFullOffload
|
||||
} else if len(gpus) > 1 {
|
||||
// multigpu should always use the partial graph size
|
||||
graphFullOffload = graphPartialOffload
|
||||
}
|
||||
|
||||
// memoryRequiredTotal represents the memory required for full GPU offloading (all layers)
|
||||
memoryRequiredTotal := memoryMinimum + graphFullOffload
|
||||
|
||||
// memoryRequiredPartial represents the memory required for partial GPU offloading (n > 0, n < layers)
|
||||
memoryRequiredPartial := memoryMinimum + graphPartialOffload
|
||||
|
||||
var memoryLayerOutput uint64
|
||||
if layer, ok := layers["output_norm"]; ok {
|
||||
memoryLayerOutput += layer.size()
|
||||
}
|
||||
|
||||
if layer, ok := layers["output"]; ok {
|
||||
memoryLayerOutput += layer.size()
|
||||
} else if layer, ok := layers["token_embd"]; ok {
|
||||
memoryLayerOutput += layer.size()
|
||||
}
|
||||
|
||||
if gpus[0].Library == "metal" && opts.UseMMap {
|
||||
// memory is preallocated for output tensors
|
||||
memoryRequiredTotal += memoryLayerOutput
|
||||
memoryRequiredPartial += memoryLayerOutput
|
||||
// Output layer handled at the end if we have space
|
||||
gpuZeroOverhead := projectorSize
|
||||
|
||||
// Reduce set of GPUs to only those that have sufficient space to fit overhead and at least one layer
|
||||
var layerCount int
|
||||
layerCounts := make([]int, len(gpus))
|
||||
gpuAllocations := make([]uint64, len(gpus))
|
||||
type gs struct {
|
||||
i int
|
||||
g *gpu.GpuInfo
|
||||
}
|
||||
gpusWithSpace := []gs{}
|
||||
for i := range gpus {
|
||||
var gzo uint64
|
||||
if len(gpusWithSpace) == 0 {
|
||||
gzo = gpuZeroOverhead
|
||||
}
|
||||
// Only include GPUs that can fit the graph, gpu minimum, the layer buffer and at least more layer
|
||||
if gpus[i].FreeMemory < gzo+max(graphPartialOffload, graphFullOffload)+gpus[i].MinimumMemory+2*layerSize {
|
||||
slog.Debug("gpu has too little memory to allocate any layers", "gpu", gpus[i])
|
||||
continue
|
||||
}
|
||||
gpusWithSpace = append(gpusWithSpace, gs{i, &gpus[i]})
|
||||
gpuAllocations[i] += gpus[i].MinimumMemory + layerSize // We hold off on graph until we know partial vs. full
|
||||
}
|
||||
|
||||
var layerCount int
|
||||
var gpuZeroID int
|
||||
if len(gpusWithSpace) > 0 {
|
||||
gpuZeroID = gpusWithSpace[0].i
|
||||
gpuAllocations[gpuZeroID] += gpuZeroOverhead
|
||||
}
|
||||
|
||||
// For all the layers, find where they can fit on the GPU(s)
|
||||
for i := range int(ggml.KV().BlockCount()) {
|
||||
// Some models have inconsistent layer sizes
|
||||
if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
|
||||
memoryLayer := blk.size()
|
||||
layerSize = blk.size()
|
||||
layerSize += kv / ggml.KV().BlockCount()
|
||||
}
|
||||
memoryWeights += layerSize
|
||||
|
||||
// KV is proportional to the number of layers
|
||||
memoryLayer += kv / ggml.KV().BlockCount()
|
||||
if opts.NumGPU >= 0 && layerCount >= opts.NumGPU {
|
||||
// Stop allocating on GPU(s) once we hit the users target NumGPU
|
||||
continue
|
||||
}
|
||||
|
||||
memoryRequiredTotal += memoryLayer
|
||||
if (opts.NumGPU >= 0 && layerCount+1 <= opts.NumGPU) || (opts.NumGPU < 0 && memoryAvailable > memoryRequiredPartial+memoryLayer) {
|
||||
memoryRequiredPartial += memoryLayer
|
||||
// distribute the layers across the GPU(s) that have space
|
||||
for j := len(gpusWithSpace); j > 0; j-- {
|
||||
g := gpusWithSpace[i%j]
|
||||
used := gpuAllocations[g.i] + max(graphPartialOffload, graphFullOffload)
|
||||
if g.g.FreeMemory > used+layerSize {
|
||||
gpuAllocations[g.i] += layerSize
|
||||
layerCounts[g.i]++
|
||||
layerCount++
|
||||
break
|
||||
} else {
|
||||
gpusWithSpace = append(gpusWithSpace[:i%j], gpusWithSpace[i%j+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gpus[0].Library != "metal" || !opts.UseMMap {
|
||||
// memory was not preallocated for output tensors
|
||||
memoryRequiredTotal += memoryLayerOutput
|
||||
if layerCount >= int(ggml.KV().BlockCount()) {
|
||||
fullyLoaded = true
|
||||
} else {
|
||||
for i := layerCount; i < int(ggml.KV().BlockCount()); i++ {
|
||||
overflow += layerSize
|
||||
}
|
||||
}
|
||||
|
||||
if (opts.NumGPU >= 0 && layerCount+1 <= opts.NumGPU) || (opts.NumGPU < 0 && memoryAvailable > memoryRequiredTotal) {
|
||||
layerCount = int(ggml.KV().BlockCount()) + 1
|
||||
memoryRequiredPartial = memoryRequiredTotal
|
||||
// Determine if we need to consider output then find where it fits
|
||||
if memoryLayerOutput > 0 && (opts.NumGPU < 0 || layerCount < opts.NumGPU) {
|
||||
for j := len(gpusWithSpace); j > 0; j-- {
|
||||
g := gpusWithSpace[layerCount%j]
|
||||
used := gpuAllocations[g.i] + max(graphPartialOffload, graphFullOffload)
|
||||
if g.g.FreeMemory > used+memoryLayerOutput {
|
||||
gpuAllocations[g.i] += memoryLayerOutput
|
||||
layerCounts[g.i]++
|
||||
layerCount++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if layerCount < int(ggml.KV().BlockCount())+1 {
|
||||
fullyLoaded = false
|
||||
overflow += memoryLayerOutput
|
||||
}
|
||||
}
|
||||
|
||||
memoryWeights := memoryRequiredTotal - memoryMinimum - graphFullOffload - kv
|
||||
// Add the applicable (full or partial) graph allocations
|
||||
for i := range gpus {
|
||||
if layerCounts[i] <= 0 {
|
||||
continue
|
||||
}
|
||||
if fullyLoaded {
|
||||
gpuAllocations[i] += graphFullOffload
|
||||
} else {
|
||||
gpuAllocations[i] += graphPartialOffload
|
||||
}
|
||||
}
|
||||
if fullyLoaded {
|
||||
graphOffload = graphFullOffload
|
||||
} else {
|
||||
graphOffload = graphPartialOffload
|
||||
}
|
||||
|
||||
// Summaries for the log
|
||||
var memoryRequiredPartial, memoryRequiredTotal uint64
|
||||
for i := range gpuAllocations {
|
||||
memoryRequiredPartial += gpuAllocations[i]
|
||||
}
|
||||
memoryRequiredTotal = memoryRequiredPartial + overflow
|
||||
|
||||
tensorSplit := ""
|
||||
if len(gpus) > 1 {
|
||||
splits := make([]string, len(gpus))
|
||||
for i, count := range layerCounts {
|
||||
splits[i] = strconv.Itoa(count)
|
||||
}
|
||||
tensorSplit = strings.Join(splits, ",")
|
||||
}
|
||||
allocationsList := []string{}
|
||||
for _, a := range gpuAllocations {
|
||||
allocationsList = append(allocationsList, format.HumanBytes2(a))
|
||||
}
|
||||
|
||||
estimate := MemoryEstimate{
|
||||
TotalSize: memoryRequiredTotal,
|
||||
Layers: 0,
|
||||
Graph: 0,
|
||||
VRAMSize: 0,
|
||||
GPUSizes: []uint64{},
|
||||
|
||||
inferenceLibrary: gpus[0].Library,
|
||||
layersRequested: opts.NumGPU,
|
||||
layersModel: int(ggml.KV().BlockCount()) + 1,
|
||||
availableList: availableList,
|
||||
kv: kv,
|
||||
allocationsList: allocationsList,
|
||||
memoryWeights: memoryWeights,
|
||||
memoryLayerOutput: memoryLayerOutput,
|
||||
graphFullOffload: graphFullOffload,
|
||||
graphPartialOffload: graphPartialOffload,
|
||||
}
|
||||
|
||||
if gpus[0].Library == "cpu" {
|
||||
return estimate
|
||||
}
|
||||
if layerCount == 0 {
|
||||
slog.Debug("insufficient VRAM to load any model layers")
|
||||
return estimate
|
||||
}
|
||||
estimate.Layers = layerCount
|
||||
estimate.Graph = graphOffload
|
||||
estimate.VRAMSize = memoryRequiredPartial
|
||||
estimate.TotalSize = memoryRequiredTotal
|
||||
estimate.TensorSplit = tensorSplit
|
||||
estimate.GPUSizes = gpuAllocations
|
||||
return estimate
|
||||
}
|
||||
|
||||
func (m MemoryEstimate) log() {
|
||||
slog.Info(
|
||||
"offload to gpu",
|
||||
"offload to "+m.inferenceLibrary,
|
||||
slog.Group(
|
||||
"layers",
|
||||
// requested number of layers to offload
|
||||
"requested", opts.NumGPU,
|
||||
"requested", m.layersRequested,
|
||||
// The number of layers the model has (including output)
|
||||
"model", m.layersModel,
|
||||
// estimated number of layers that can be offloaded
|
||||
"real", layerCount,
|
||||
"offload", m.Layers,
|
||||
// multi-gpu split for tensors
|
||||
"split", m.TensorSplit,
|
||||
),
|
||||
slog.Group(
|
||||
"memory",
|
||||
// memory available for offloading
|
||||
"available", format.HumanBytes2(memoryAvailable),
|
||||
// memory available by GPU for offloading
|
||||
"available", m.availableList,
|
||||
slog.Group(
|
||||
"required",
|
||||
// memory required for full offloading
|
||||
"full", format.HumanBytes2(memoryRequiredTotal),
|
||||
"full", format.HumanBytes2(m.TotalSize),
|
||||
// memory required to offload layers.estimate layers
|
||||
"partial", format.HumanBytes2(memoryRequiredPartial),
|
||||
"partial", format.HumanBytes2(m.VRAMSize),
|
||||
// memory of KV cache
|
||||
"kv", format.HumanBytes2(kv),
|
||||
"kv", format.HumanBytes2(m.kv),
|
||||
// Allocations across the GPUs
|
||||
"allocations", m.allocationsList,
|
||||
),
|
||||
slog.Group(
|
||||
"weights",
|
||||
// memory of the weights
|
||||
"total", format.HumanBytes2(memoryWeights),
|
||||
"total", format.HumanBytes2(m.memoryWeights),
|
||||
// memory of repeating layers
|
||||
"repeating", format.HumanBytes2(memoryWeights-memoryLayerOutput),
|
||||
"repeating", format.HumanBytes2(m.memoryWeights-m.memoryLayerOutput),
|
||||
// memory of non-repeating layers
|
||||
"nonrepeating", format.HumanBytes2(memoryLayerOutput),
|
||||
"nonrepeating", format.HumanBytes2(m.memoryLayerOutput),
|
||||
),
|
||||
slog.Group(
|
||||
"graph",
|
||||
// memory of graph when fully offloaded
|
||||
"full", format.HumanBytes2(graphFullOffload),
|
||||
"full", format.HumanBytes2(m.graphFullOffload),
|
||||
// memory of graph when not fully offloaded
|
||||
"partial", format.HumanBytes2(graphPartialOffload),
|
||||
"partial", format.HumanBytes2(m.graphPartialOffload),
|
||||
),
|
||||
),
|
||||
)
|
||||
if gpus[0].Library == "cpu" {
|
||||
return 0, 0, memoryRequiredTotal
|
||||
}
|
||||
if memoryRequiredPartial > memoryAvailable {
|
||||
slog.Debug("insufficient VRAM to load any model layers")
|
||||
return 0, 0, memoryRequiredTotal
|
||||
}
|
||||
|
||||
return layerCount, memoryRequiredPartial, memoryRequiredTotal
|
||||
}
|
||||
|
130
llm/memory_test.go
Normal file
130
llm/memory_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package llm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEstimateGPULayers(t *testing.T) {
|
||||
envconfig.Debug = true
|
||||
modelName := "dummy"
|
||||
f, err := os.CreateTemp(t.TempDir(), modelName)
|
||||
require.NoError(t, err)
|
||||
defer f.Close()
|
||||
gguf := NewGGUFV3(binary.LittleEndian)
|
||||
inputLayerCount := 5
|
||||
|
||||
tensors := []Tensor{
|
||||
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
{Name: "blk.1.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
{Name: "blk.2.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
{Name: "blk.3.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
{Name: "blk.4.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
{Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
}
|
||||
assert.Len(t, tensors, inputLayerCount+1)
|
||||
err = gguf.Encode(f, KV{
|
||||
"general.architecture": "llama",
|
||||
"general.name": "name",
|
||||
"llama.context_length": uint32(32),
|
||||
"llama.embedding_length": uint32(4096),
|
||||
"llama.block_count": uint32(inputLayerCount),
|
||||
"llama.attention.head_count": uint32(32),
|
||||
"llama.attention.head_count_kv": uint32(32),
|
||||
"tokenizer.ggml.tokens": []string{" "},
|
||||
"tokenizer.ggml.scores": []float32{0},
|
||||
"tokenizer.ggml.token_type": []int32{0},
|
||||
}, tensors)
|
||||
require.NoError(t, err)
|
||||
|
||||
ggml, err := LoadModel(f.Name(), 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Simple CPU scenario
|
||||
gpus := []gpu.GpuInfo{
|
||||
{
|
||||
Library: "cpu",
|
||||
},
|
||||
}
|
||||
projectors := []string{}
|
||||
opts := api.DefaultOptions()
|
||||
t.Run("cpu", func(t *testing.T) {
|
||||
estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
assert.Equal(t, 0, estimate.Layers)
|
||||
assert.Equal(t, uint64(0), estimate.Graph)
|
||||
})
|
||||
|
||||
// derived from the dummy ggml file above
|
||||
graphPartialOffload := uint64(202377216)
|
||||
graphFullOffload := uint64(171968512)
|
||||
layerSize := uint64(33554436)
|
||||
projectorSize := uint64(0)
|
||||
memoryLayerOutput := uint64(4)
|
||||
|
||||
// Dual CUDA scenario with assymetry
|
||||
gpuMinimumMemory := uint64(2048)
|
||||
gpus = []gpu.GpuInfo{
|
||||
{
|
||||
Library: "cuda",
|
||||
MinimumMemory: gpuMinimumMemory,
|
||||
},
|
||||
{
|
||||
Library: "cuda",
|
||||
MinimumMemory: gpuMinimumMemory,
|
||||
},
|
||||
}
|
||||
// Nested array: GPU0 layer space, GPU1 layer space, expected gpu0, expected gpu1
|
||||
for i, s := range []struct {
|
||||
layer0, layer1 uint64
|
||||
expect0, expect1 uint64
|
||||
}{
|
||||
{1, 1, 1, 1},
|
||||
{2, 1, 2, 1},
|
||||
{2, 2, 2, 2},
|
||||
{1, 2, 1, 2},
|
||||
{3, 3, 3, 3},
|
||||
{4, 4, 3, 3},
|
||||
{6, 6, 3, 3},
|
||||
{0, 3, 0, 3},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("%v", s), func(t *testing.T) {
|
||||
gpus[0].FreeMemory = 0
|
||||
gpus[1].FreeMemory = 0
|
||||
gpus[0].FreeMemory += projectorSize
|
||||
if s.layer0 > 0 {
|
||||
gpus[0].FreeMemory += memoryLayerOutput
|
||||
} else {
|
||||
gpus[1].FreeMemory += memoryLayerOutput
|
||||
}
|
||||
gpus[0].FreeMemory += gpuMinimumMemory + layerSize + s.layer0*layerSize + 1
|
||||
gpus[1].FreeMemory += gpuMinimumMemory + layerSize + s.layer1*layerSize + 1
|
||||
gpus[0].FreeMemory += max(graphFullOffload, graphPartialOffload)
|
||||
gpus[1].FreeMemory += max(graphFullOffload, graphPartialOffload)
|
||||
estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
assert.Equal(t, int(s.expect0+s.expect1), estimate.Layers, "scenario %d: %v", i, s)
|
||||
assert.Equal(t, fmt.Sprintf("%d,%d", s.expect0, s.expect1), estimate.TensorSplit, "scenario %d: %v", i, s)
|
||||
var layerSums uint64
|
||||
for _, b := range estimate.GPUSizes {
|
||||
layerSums += b
|
||||
}
|
||||
if estimate.Layers < inputLayerCount+1 {
|
||||
assert.Less(t, estimate.VRAMSize, estimate.TotalSize, "scenario %d: %v %+v", i, s, estimate)
|
||||
assert.Equal(t, estimate.VRAMSize, layerSums, "scenario %d: %v %+v", i, s, estimate)
|
||||
} else {
|
||||
assert.Equal(t, estimate.VRAMSize, estimate.TotalSize, "scenario %d: %v %+v", i, s, estimate)
|
||||
assert.Equal(t, estimate.TotalSize, layerSums, "scenario %d: %v %+v", i, s, estimate)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,8 +1,8 @@
|
||||
diff --git a/common/common.cpp b/common/common.cpp
|
||||
index ba1ecf0e..cead57cc 100644
|
||||
index 2c05a4d4..927f0e3d 100644
|
||||
--- a/common/common.cpp
|
||||
+++ b/common/common.cpp
|
||||
@@ -1836,6 +1836,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||
@@ -2093,6 +2093,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||
mparams.use_mmap = params.use_mmap;
|
||||
mparams.use_mlock = params.use_mlock;
|
||||
mparams.check_tensors = params.check_tensors;
|
||||
@@ -12,20 +12,20 @@ index ba1ecf0e..cead57cc 100644
|
||||
mparams.kv_overrides = NULL;
|
||||
} else {
|
||||
diff --git a/common/common.h b/common/common.h
|
||||
index d80344f2..71e84834 100644
|
||||
index 65c0ef81..ebca2c77 100644
|
||||
--- a/common/common.h
|
||||
+++ b/common/common.h
|
||||
@@ -174,6 +174,13 @@ struct gpt_params {
|
||||
// multimodal models (see examples/llava)
|
||||
@@ -184,6 +184,13 @@ struct gpt_params {
|
||||
std::string mmproj = ""; // path to multimodal projector
|
||||
std::vector<std::string> image; // path to image file(s)
|
||||
+
|
||||
|
||||
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
||||
+ // If the provided progress_callback returns true, model loading continues.
|
||||
+ // If it returns false, model loading is immediately aborted.
|
||||
+ llama_progress_callback progress_callback = NULL;
|
||||
+ // context pointer passed to the progress callback
|
||||
+ void * progress_callback_user_data;
|
||||
};
|
||||
|
||||
void gpt_params_handle_model_default(gpt_params & params);
|
||||
+
|
||||
// embedding
|
||||
bool embedding = false; // get only sentence embedding
|
||||
int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
||||
|
@@ -1,17 +1,8 @@
|
||||
From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001
|
||||
From: Michael Yang <mxyng@pm.me>
|
||||
Date: Thu, 23 May 2024 11:18:45 -0700
|
||||
Subject: [PATCH] throw exception on load errors
|
||||
|
||||
---
|
||||
llama.cpp | 25 ++++++++++++++++---------
|
||||
1 file changed, 16 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
index 15c66077..8ba90b6a 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 73f52435..58a00fb1 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -7241,7 +7241,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||
}
|
||||
} catch (const std::exception & err) {
|
||||
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
||||
@@ -20,7 +11,7 @@ index 15c66077..8ba90b6a 100644
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file(
|
||||
@@ -17564,16 +17564,23 @@ struct llama_model * llama_load_model_from_file(
|
||||
}
|
||||
model->rpc_servers.push_back(servers);
|
||||
}
|
||||
@@ -52,6 +43,3 @@ index 15c66077..8ba90b6a 100644
|
||||
}
|
||||
|
||||
return model;
|
||||
--
|
||||
2.45.1
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
diff --git a/ggml-metal.m b/ggml-metal.m
|
||||
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
|
||||
index 0207b787..b5e9884b 100644
|
||||
--- a/ggml-metal.m
|
||||
+++ b/ggml-metal.m
|
||||
--- a/ggml/src/ggml-metal.m
|
||||
+++ b/ggml/src/ggml-metal.m
|
||||
@@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||
// to the matrix-vector kernel
|
||||
int ne11_mm_min = 1;
|
||||
|
@@ -1,11 +1,11 @@
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
index 40d2ec2c..74f3ee9c 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
@@ -4642,16 +4642,7 @@ static void llm_load_vocab(
|
||||
|
||||
// for now, only BPE models have pre-tokenizers
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 2b9ace28..172640e2 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -5357,16 +5357,7 @@ static void llm_load_vocab(
|
||||
if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
|
||||
vocab.tokenizer_add_space_prefix = false;
|
||||
vocab.tokenizer_clean_spaces = true;
|
||||
- if (tokenizer_pre.empty()) {
|
||||
- LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
|
||||
- LLAMA_LOG_WARN("%s: \n", __func__);
|
||||
@@ -15,18 +15,18 @@ index 40d2ec2c..74f3ee9c 100644
|
||||
- LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
||||
- LLAMA_LOG_WARN("%s: \n", __func__);
|
||||
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
- } else if (
|
||||
+ if (
|
||||
tokenizer_pre == "default") {
|
||||
- } else if (tokenizer_pre == "default") {
|
||||
+ if (tokenizer_pre == "default") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
} else if (
|
||||
@@ -4703,7 +4694,8 @@ static void llm_load_vocab(
|
||||
tokenizer_pre == "smaug-bpe") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
|
||||
tokenizer_pre == "llama3" ||
|
||||
@@ -5439,7 +5430,8 @@ static void llm_load_vocab(
|
||||
tokenizer_pre == "jais") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
|
||||
} else {
|
||||
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||
+ LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
|
||||
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
}
|
||||
} else {
|
||||
} else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 40d2ec2c..f34eb79a 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv(
|
||||
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||
cb(kq, "kq", il);
|
||||
|
45
llm/patches/07-embeddings.diff
Normal file
45
llm/patches/07-embeddings.diff
Normal file
@@ -0,0 +1,45 @@
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 1fe2b9f7..a43312a7 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -13689,7 +13689,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
|
||||
const auto n_embd = hparams.n_embd;
|
||||
|
||||
// TODO: use a per-batch flag for logits presence instead
|
||||
- const bool has_logits = !cparams.embeddings;
|
||||
+ const bool has_logits = cparams.causal_attn;
|
||||
const bool has_embd = lctx.is_encoding || (cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE));
|
||||
|
||||
const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
|
||||
@@ -13959,17 +13959,25 @@ static int llama_decode_internal(
|
||||
// no output
|
||||
res = nullptr;
|
||||
embd = nullptr;
|
||||
- } else if (cparams.embeddings) {
|
||||
- res = nullptr; // do not extract logits for embedding case
|
||||
- embd = gf->nodes[gf->n_nodes - 1];
|
||||
- if (strcmp(embd->name, "result_embd_pooled") != 0) {
|
||||
- embd = gf->nodes[gf->n_nodes - 2];
|
||||
+ }
|
||||
+
|
||||
+ if (cparams.embeddings) {
|
||||
+ for (int i = gf->n_nodes - 1; i >= 0; --i) {
|
||||
+ embd = gf->nodes[i];
|
||||
+ if (strcmp(embd->name, "result_embd_pooled") == 0) {
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor");
|
||||
- } else {
|
||||
+ } else {
|
||||
embd = nullptr; // do not extract embeddings when not needed
|
||||
GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
|
||||
}
|
||||
+
|
||||
+ if (!cparams.causal_attn) {
|
||||
+ res = nullptr; // do not extract logits when not needed
|
||||
+ }
|
||||
+
|
||||
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
||||
|
||||
ggml_backend_sched_alloc_graph(lctx.sched, gf);
|
42
llm/patches/08-clip-unicode.diff
Normal file
42
llm/patches/08-clip-unicode.diff
Normal file
@@ -0,0 +1,42 @@
|
||||
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
||||
index 95fbe3d0..5a02a6ec 100644
|
||||
--- a/examples/llava/clip.cpp
|
||||
+++ b/examples/llava/clip.cpp
|
||||
@@ -32,6 +33,14 @@
|
||||
#include <cinttypes>
|
||||
#include <limits>
|
||||
|
||||
+#if defined(_WIN32)
|
||||
+#define WIN32_LEAN_AND_MEAN
|
||||
+#ifndef NOMINMAX
|
||||
+ #define NOMINMAX
|
||||
+#endif
|
||||
+#include <windows.h>
|
||||
+#endif
|
||||
+
|
||||
//#define CLIP_DEBUG_FUNCTIONS
|
||||
|
||||
// RGB uint8 image
|
||||
@@ -1055,7 +1064,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
+#ifdef _WIN32
|
||||
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, NULL, 0);
|
||||
+ if (!wlen) {
|
||||
+ return NULL;
|
||||
+ }
|
||||
+ wchar_t * wbuf = (wchar_t *) malloc(wlen * sizeof(wchar_t));
|
||||
+ wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, wbuf, wlen);
|
||||
+ if (!wlen) {
|
||||
+ free(wbuf);
|
||||
+ return NULL;
|
||||
+ }
|
||||
+ auto fin = std::ifstream(wbuf, std::ios::binary);
|
||||
+ free(wbuf);
|
||||
+#else
|
||||
auto fin = std::ifstream(fname, std::ios::binary);
|
||||
+#endif
|
||||
if (!fin) {
|
||||
LOG_TEE("cannot open model file for loading tensors\n");
|
||||
clip_free(new_clip);
|
60
llm/patches/09-pooling.diff
Normal file
60
llm/patches/09-pooling.diff
Normal file
@@ -0,0 +1,60 @@
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 721b8f4e..cfe7ac40 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -8420,14 +8420,14 @@ struct llm_build_context {
|
||||
}
|
||||
|
||||
struct ggml_tensor * build_inp_mean() {
|
||||
- lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
|
||||
+ lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, cparams.n_seq_max);
|
||||
cb(lctx.inp_mean, "inp_mean", -1);
|
||||
ggml_set_input(lctx.inp_mean);
|
||||
return lctx.inp_mean;
|
||||
}
|
||||
|
||||
struct ggml_tensor * build_inp_cls() {
|
||||
- lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
||||
+ lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, cparams.n_seq_max);
|
||||
cb(lctx.inp_cls, "inp_cls", -1);
|
||||
ggml_set_input(lctx.inp_cls);
|
||||
return lctx.inp_cls;
|
||||
@@ -13847,19 +13847,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
|
||||
|
||||
float * data = (float *) lctx.inp_mean->data;
|
||||
- memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
|
||||
+ memset(lctx.inp_mean->data, 0, n_tokens * cparams.n_seq_max * ggml_element_size(lctx.inp_mean));
|
||||
|
||||
std::vector<uint64_t> sum(n_tokens, 0);
|
||||
for (int i = 0; i < n_tokens; ++i) {
|
||||
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||
-
|
||||
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
|
||||
-
|
||||
sum[seq_id] += 1;
|
||||
}
|
||||
|
||||
- std::vector<float> div(n_tokens, 0.0f);
|
||||
- for (int i = 0; i < n_tokens; ++i) {
|
||||
+ std::vector<float> div(cparams.n_seq_max, 0.0f);
|
||||
+ for (uint32_t i = 0; i < cparams.n_seq_max; ++i) {
|
||||
const uint64_t s = sum[i];
|
||||
if (s > 0) {
|
||||
div[i] = 1.0f/float(s);
|
||||
@@ -13879,14 +13876,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
|
||||
|
||||
uint32_t * data = (uint32_t *) lctx.inp_cls->data;
|
||||
- memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
|
||||
+ memset(lctx.inp_cls->data, 0, cparams.n_seq_max * ggml_element_size(lctx.inp_cls));
|
||||
|
||||
for (int i = 0; i < n_tokens; ++i) {
|
||||
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||
const llama_pos pos = batch.pos[i];
|
||||
-
|
||||
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS");
|
||||
-
|
||||
if (pos == 0) {
|
||||
data[seq_id] = i;
|
||||
}
|
@@ -38,7 +38,7 @@ func Init() error {
|
||||
}
|
||||
|
||||
var variants []string
|
||||
for v := range availableServers() {
|
||||
for v := range getAvailableServers() {
|
||||
variants = append(variants, v)
|
||||
}
|
||||
slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants))
|
||||
@@ -50,7 +50,7 @@ func Init() error {
|
||||
// binary names may contain an optional variant separated by '_'
|
||||
// For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2"
|
||||
// Any library without a variant is the lowest common denominator
|
||||
func availableServers() map[string]string {
|
||||
func getAvailableServers() map[string]string {
|
||||
payloadsDir, err := gpu.PayloadsDir()
|
||||
if err != nil {
|
||||
slog.Error("payload lookup error", "error", err)
|
||||
@@ -58,7 +58,7 @@ func availableServers() map[string]string {
|
||||
}
|
||||
|
||||
// glob payloadsDir for files that start with ollama_
|
||||
pattern := filepath.Join(payloadsDir, "*")
|
||||
pattern := filepath.Join(payloadsDir, "*", "ollama_*")
|
||||
|
||||
files, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
@@ -69,7 +69,7 @@ func availableServers() map[string]string {
|
||||
servers := make(map[string]string)
|
||||
for _, file := range files {
|
||||
slog.Debug("availableServers : found", "file", file)
|
||||
servers[filepath.Base(file)] = file
|
||||
servers[filepath.Base(filepath.Dir(file))] = filepath.Dir(file)
|
||||
}
|
||||
|
||||
return servers
|
||||
@@ -80,10 +80,10 @@ func availableServers() map[string]string {
|
||||
// TODO - switch to metadata based mapping
|
||||
func serversForGpu(info gpu.GpuInfo) []string {
|
||||
// glob workDir for files that start with ollama_
|
||||
availableServers := availableServers()
|
||||
availableServers := getAvailableServers()
|
||||
requested := info.Library
|
||||
if info.Variant != "" {
|
||||
requested += "_" + info.Variant
|
||||
if info.Variant != gpu.CPUCapabilityNone {
|
||||
requested += "_" + info.Variant.String()
|
||||
}
|
||||
|
||||
servers := []string{}
|
||||
@@ -115,27 +115,29 @@ func serversForGpu(info gpu.GpuInfo) []string {
|
||||
servers = append(servers, alt...)
|
||||
}
|
||||
|
||||
// Load up the best CPU variant if not primary requested
|
||||
if info.Library != "cpu" {
|
||||
variant := gpu.GetCPUVariant()
|
||||
// If no variant, then we fall back to default
|
||||
// If we have a variant, try that if we find an exact match
|
||||
// Attempting to run the wrong CPU instructions will panic the
|
||||
// process
|
||||
if variant != "" {
|
||||
for cmp := range availableServers {
|
||||
if cmp == "cpu_"+variant {
|
||||
servers = append(servers, cmp)
|
||||
break
|
||||
if !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") {
|
||||
// Load up the best CPU variant if not primary requested
|
||||
if info.Library != "cpu" {
|
||||
variant := gpu.GetCPUCapability()
|
||||
// If no variant, then we fall back to default
|
||||
// If we have a variant, try that if we find an exact match
|
||||
// Attempting to run the wrong CPU instructions will panic the
|
||||
// process
|
||||
if variant != gpu.CPUCapabilityNone {
|
||||
for cmp := range availableServers {
|
||||
if cmp == "cpu_"+variant.String() {
|
||||
servers = append(servers, cmp)
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
servers = append(servers, "cpu")
|
||||
}
|
||||
} else {
|
||||
servers = append(servers, "cpu")
|
||||
}
|
||||
}
|
||||
|
||||
if len(servers) == 0 {
|
||||
servers = []string{"cpu"}
|
||||
if len(servers) == 0 {
|
||||
servers = []string{"cpu"}
|
||||
}
|
||||
}
|
||||
|
||||
return servers
|
||||
@@ -146,11 +148,11 @@ func serverForCpu() string {
|
||||
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
|
||||
return "metal"
|
||||
}
|
||||
variant := gpu.GetCPUVariant()
|
||||
availableServers := availableServers()
|
||||
if variant != "" {
|
||||
variant := gpu.GetCPUCapability()
|
||||
availableServers := getAvailableServers()
|
||||
if variant != gpu.CPUCapabilityNone {
|
||||
for cmp := range availableServers {
|
||||
if cmp == "cpu_"+variant {
|
||||
if cmp == "cpu_"+variant.String() {
|
||||
return cmp
|
||||
}
|
||||
}
|
||||
|
175
llm/server.go
175
llm/server.go
@@ -37,8 +37,9 @@ type LlamaServer interface {
|
||||
Tokenize(ctx context.Context, content string) ([]int, error)
|
||||
Detokenize(ctx context.Context, tokens []int) (string, error)
|
||||
Close() error
|
||||
EstimatedVRAM() uint64
|
||||
EstimatedVRAM() uint64 // Total VRAM across all GPUs
|
||||
EstimatedTotal() uint64
|
||||
EstimatedVRAMByGPU(gpuID string) uint64
|
||||
}
|
||||
|
||||
// llmServer is an instance of the llama.cpp server
|
||||
@@ -49,18 +50,22 @@ type llmServer struct {
|
||||
status *StatusWriter
|
||||
options api.Options
|
||||
|
||||
// TODO - this should be broken down by GPU
|
||||
estimatedVRAM uint64 // Estimated usage of VRAM by the loaded model
|
||||
estimatedTotal uint64 // Total size of model
|
||||
totalLayers uint64
|
||||
gpuCount int
|
||||
loadDuration time.Duration // Record how long it took the model to load
|
||||
loadProgress float32
|
||||
estimate MemoryEstimate
|
||||
totalLayers uint64
|
||||
// gpuCount int
|
||||
gpus gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
|
||||
loadDuration time.Duration // Record how long it took the model to load
|
||||
loadProgress float32
|
||||
|
||||
sem *semaphore.Weighted
|
||||
}
|
||||
|
||||
func LoadModel(model string) (*GGML, error) {
|
||||
// LoadModel will load a model from disk. The model must be in the GGML format.
|
||||
//
|
||||
// It collects array values for arrays with a size less than or equal to
|
||||
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
|
||||
// the maxArraySize is negative, all arrays are collected.
|
||||
func LoadModel(model string, maxArraySize int) (*GGML, error) {
|
||||
if _, err := os.Stat(model); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,52 +76,54 @@ func LoadModel(model string) (*GGML, error) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
ggml, _, err := DecodeGGML(f)
|
||||
ggml, _, err := DecodeGGML(f, maxArraySize)
|
||||
return ggml, err
|
||||
}
|
||||
|
||||
// NewLlamaServer will run a server for the given GPUs
|
||||
// The gpu list must be a single family.
|
||||
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
|
||||
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
|
||||
var err error
|
||||
var cpuRunner string
|
||||
var estimatedVRAM uint64
|
||||
var estimatedTotal uint64
|
||||
var systemMemory uint64
|
||||
gpuCount := len(gpus)
|
||||
if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
|
||||
// TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
|
||||
var estimate MemoryEstimate
|
||||
var systemTotalMemory uint64
|
||||
var systemFreeMemory uint64
|
||||
|
||||
cpuRunner = serverForCpu()
|
||||
gpuCount = 0
|
||||
_, _, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
systemMemInfo, err := gpu.GetCPUMem()
|
||||
if err != nil {
|
||||
slog.Error("failed to lookup system memory", "error", err)
|
||||
} else {
|
||||
if gpus[0].Library == "metal" {
|
||||
memInfo, err := gpu.GetCPUMem()
|
||||
if err != nil {
|
||||
slog.Error("failed to lookup system memory", "error", err)
|
||||
} else {
|
||||
systemMemory = memInfo.TotalMemory
|
||||
slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
|
||||
}
|
||||
}
|
||||
var layers int
|
||||
layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
systemTotalMemory = systemMemInfo.TotalMemory
|
||||
systemFreeMemory = systemMemInfo.FreeMemory
|
||||
slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory)
|
||||
}
|
||||
|
||||
// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
|
||||
if opts.NumGPU == 0 {
|
||||
gpus = gpu.GetCPUInfo()
|
||||
}
|
||||
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
||||
cpuRunner = serverForCpu()
|
||||
estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
} else {
|
||||
estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
|
||||
switch {
|
||||
case gpus[0].Library == "metal" && estimatedVRAM > systemMemory:
|
||||
case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
|
||||
// disable partial offloading when model is greater than total system memory as this
|
||||
// can lead to locking up the system
|
||||
opts.NumGPU = 0
|
||||
case gpus[0].Library != "metal" && layers == 0:
|
||||
case gpus[0].Library != "metal" && estimate.Layers == 0:
|
||||
// Don't bother loading into the GPU if no layers can fit
|
||||
cpuRunner = serverForCpu()
|
||||
gpuCount = 0
|
||||
case opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu":
|
||||
opts.NumGPU = layers
|
||||
gpus = gpu.GetCPUInfo()
|
||||
case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
|
||||
opts.NumGPU = estimate.Layers
|
||||
}
|
||||
}
|
||||
|
||||
estimate.log()
|
||||
|
||||
// Loop through potential servers
|
||||
finalErr := errors.New("no suitable llama servers found")
|
||||
|
||||
@@ -124,7 +131,20 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
|
||||
}
|
||||
|
||||
availableServers := availableServers()
|
||||
availableServers := getAvailableServers()
|
||||
if len(availableServers) == 0 {
|
||||
if runtime.GOOS != "windows" {
|
||||
slog.Warn("llama server binary disappeared, reinitializing payloads")
|
||||
err = Init()
|
||||
if err != nil {
|
||||
slog.Warn("failed to reinitialize payloads", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
availableServers = getAvailableServers()
|
||||
} else {
|
||||
return nil, finalErr
|
||||
}
|
||||
}
|
||||
var servers []string
|
||||
if cpuRunner != "" {
|
||||
servers = []string{cpuRunner}
|
||||
@@ -201,7 +221,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
if g.Library == "metal" &&
|
||||
uint64(opts.NumGPU) > 0 &&
|
||||
uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
||||
opts.UseMMap = false
|
||||
opts.UseMMap = new(bool)
|
||||
*opts.UseMMap = false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,7 +230,13 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
params = append(params, "--flash-attn")
|
||||
}
|
||||
|
||||
if !opts.UseMMap {
|
||||
// Windows CUDA should not use mmap for best performance
|
||||
// Linux with a model larger than free space, mmap leads to thrashing
|
||||
// For CPU loads we want the memory to be allocated, not FS cache
|
||||
if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
|
||||
(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
|
||||
(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
|
||||
(opts.UseMMap != nil && !*opts.UseMMap) {
|
||||
params = append(params, "--no-mmap")
|
||||
}
|
||||
|
||||
@@ -221,16 +248,15 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
params = append(params, "--numa")
|
||||
}
|
||||
|
||||
numParallel := envconfig.NumParallel
|
||||
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
||||
|
||||
// TODO (jmorganca): multimodal models don't support parallel yet
|
||||
// see https://github.com/ollama/ollama/issues/4165
|
||||
if len(projectors) > 0 {
|
||||
numParallel = 1
|
||||
slog.Warn("multimodal models don't support parallel requests yet")
|
||||
if estimate.TensorSplit != "" {
|
||||
params = append(params, "--tensor-split", estimate.TensorSplit)
|
||||
}
|
||||
|
||||
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
||||
if estimate.TensorSplit != "" {
|
||||
params = append(params, "--tensor-split", estimate.TensorSplit)
|
||||
}
|
||||
|
||||
for i := range len(servers) {
|
||||
dir := availableServers[servers[i]]
|
||||
@@ -242,8 +268,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
}
|
||||
|
||||
if strings.HasPrefix(servers[i], "cpu") {
|
||||
// TODO if we tried a gpu runner first, and it failed, record the error and bubble that back up
|
||||
gpuCount = 0
|
||||
gpus = gpu.GetCPUInfo()
|
||||
}
|
||||
|
||||
// Find an availableServers port, retry on each iteration in case the failure was a port conflict race
|
||||
@@ -265,8 +290,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
if runtime.GOOS == "windows" {
|
||||
pathEnv = "PATH"
|
||||
}
|
||||
// prepend the server directory to LD_LIBRARY_PATH/PATH
|
||||
libraryPaths := []string{dir}
|
||||
// prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
|
||||
libraryPaths := []string{dir, filepath.Dir(dir)}
|
||||
|
||||
if libraryPath, ok := os.LookupEnv(pathEnv); ok {
|
||||
// Append our runner directory to the path
|
||||
@@ -299,22 +324,25 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
}
|
||||
|
||||
s := &llmServer{
|
||||
port: port,
|
||||
cmd: exec.Command(server, finalParams...),
|
||||
status: NewStatusWriter(os.Stderr),
|
||||
options: opts,
|
||||
estimatedVRAM: estimatedVRAM,
|
||||
estimatedTotal: estimatedTotal,
|
||||
sem: semaphore.NewWeighted(int64(numParallel)),
|
||||
totalLayers: ggml.KV().BlockCount() + 1,
|
||||
gpuCount: gpuCount,
|
||||
done: make(chan error, 1),
|
||||
port: port,
|
||||
cmd: exec.Command(server, finalParams...),
|
||||
status: NewStatusWriter(os.Stderr),
|
||||
options: opts,
|
||||
estimate: estimate,
|
||||
sem: semaphore.NewWeighted(int64(numParallel)),
|
||||
totalLayers: ggml.KV().BlockCount() + 1,
|
||||
gpus: gpus,
|
||||
done: make(chan error, 1),
|
||||
}
|
||||
|
||||
s.cmd.Env = os.Environ()
|
||||
s.cmd.Stdout = os.Stdout
|
||||
s.cmd.Stderr = s.status
|
||||
|
||||
envWorkarounds := [][2]string{}
|
||||
for _, gpu := range gpus {
|
||||
envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
|
||||
}
|
||||
visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
|
||||
pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
|
||||
|
||||
@@ -329,6 +357,12 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
|
||||
s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
|
||||
devicesNeeded = false
|
||||
} else if len(envWorkarounds) != 0 {
|
||||
for _, kv := range envWorkarounds {
|
||||
if strings.EqualFold(cmp[0], kv[0]) {
|
||||
s.cmd.Env[i] = kv[0] + "=" + kv[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if pathNeeded {
|
||||
@@ -390,7 +424,7 @@ func projectorMemoryRequirements(filename string) uint64 {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
ggml, _, err := DecodeGGML(file)
|
||||
ggml, _, err := DecodeGGML(file, 0)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
@@ -540,6 +574,9 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
||||
if s.status != nil && s.status.LastErrMsg != "" {
|
||||
msg = s.status.LastErrMsg
|
||||
}
|
||||
if strings.Contains(msg, "unknown model") {
|
||||
return fmt.Errorf("this model is not supported by your version of Ollama. You may need to upgrade")
|
||||
}
|
||||
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
||||
default:
|
||||
}
|
||||
@@ -662,10 +699,9 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
||||
}
|
||||
defer s.sem.Release(1)
|
||||
|
||||
// only allow maximum 10 "context shifts" to avoid infinite generation
|
||||
// put an upper limit on num_predict to avoid the model running on forever
|
||||
if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
|
||||
req.Options.NumPredict = 10 * s.options.NumCtx
|
||||
slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
|
||||
}
|
||||
|
||||
request := map[string]any{
|
||||
@@ -1004,11 +1040,20 @@ func (s *llmServer) Close() error {
|
||||
}
|
||||
|
||||
func (s *llmServer) EstimatedVRAM() uint64 {
|
||||
return s.estimatedVRAM
|
||||
return s.estimate.VRAMSize
|
||||
}
|
||||
|
||||
func (s *llmServer) EstimatedTotal() uint64 {
|
||||
return s.estimatedTotal
|
||||
return s.estimate.TotalSize
|
||||
}
|
||||
|
||||
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
|
||||
for i, gpu := range s.gpus {
|
||||
if gpu.ID == gpuID {
|
||||
return s.estimate.GPUSizes[i]
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func parseDurationMs(ms float64) time.Duration {
|
||||
|
@@ -25,6 +25,7 @@ var errorPrefixes = []string{
|
||||
"CUDA error",
|
||||
"cudaMalloc failed",
|
||||
"\"ERR\"",
|
||||
"error loading model",
|
||||
}
|
||||
|
||||
func (w *StatusWriter) Write(b []byte) (int, error) {
|
||||
|
389
openai/openai.go
389
openai/openai.go
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
type Error struct {
|
||||
@@ -42,6 +43,12 @@ type ChunkChoice struct {
|
||||
FinishReason *string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type CompleteChunkChoice struct {
|
||||
Text string `json:"text"`
|
||||
Index int `json:"index"`
|
||||
FinishReason *string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
type Usage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
@@ -85,6 +92,51 @@ type ChatCompletionChunk struct {
|
||||
Choices []ChunkChoice `json:"choices"`
|
||||
}
|
||||
|
||||
// TODO (https://github.com/ollama/ollama/issues/5259): support []string, []int and [][]int
|
||||
type CompletionRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
FrequencyPenalty float32 `json:"frequency_penalty"`
|
||||
MaxTokens *int `json:"max_tokens"`
|
||||
PresencePenalty float32 `json:"presence_penalty"`
|
||||
Seed *int `json:"seed"`
|
||||
Stop any `json:"stop"`
|
||||
Stream bool `json:"stream"`
|
||||
Temperature *float32 `json:"temperature"`
|
||||
TopP float32 `json:"top_p"`
|
||||
}
|
||||
|
||||
type Completion struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
SystemFingerprint string `json:"system_fingerprint"`
|
||||
Choices []CompleteChunkChoice `json:"choices"`
|
||||
Usage Usage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
type CompletionChunk struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Choices []CompleteChunkChoice `json:"choices"`
|
||||
Model string `json:"model"`
|
||||
SystemFingerprint string `json:"system_fingerprint"`
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
Id string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
OwnedBy string `json:"owned_by"`
|
||||
}
|
||||
|
||||
type ListCompletion struct {
|
||||
Object string `json:"object"`
|
||||
Data []Model `json:"data"`
|
||||
}
|
||||
|
||||
func NewError(code int, message string) ErrorResponse {
|
||||
var etype string
|
||||
switch code {
|
||||
@@ -145,7 +197,79 @@ func toChunk(id string, r api.ChatResponse) ChatCompletionChunk {
|
||||
}
|
||||
}
|
||||
|
||||
func fromRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||
func toCompletion(id string, r api.GenerateResponse) Completion {
|
||||
return Completion{
|
||||
Id: id,
|
||||
Object: "text_completion",
|
||||
Created: r.CreatedAt.Unix(),
|
||||
Model: r.Model,
|
||||
SystemFingerprint: "fp_ollama",
|
||||
Choices: []CompleteChunkChoice{{
|
||||
Text: r.Response,
|
||||
Index: 0,
|
||||
FinishReason: func(reason string) *string {
|
||||
if len(reason) > 0 {
|
||||
return &reason
|
||||
}
|
||||
return nil
|
||||
}(r.DoneReason),
|
||||
}},
|
||||
Usage: Usage{
|
||||
// TODO: ollama returns 0 for prompt eval if the prompt was cached, but openai returns the actual count
|
||||
PromptTokens: r.PromptEvalCount,
|
||||
CompletionTokens: r.EvalCount,
|
||||
TotalTokens: r.PromptEvalCount + r.EvalCount,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toCompleteChunk(id string, r api.GenerateResponse) CompletionChunk {
|
||||
return CompletionChunk{
|
||||
Id: id,
|
||||
Object: "text_completion",
|
||||
Created: time.Now().Unix(),
|
||||
Model: r.Model,
|
||||
SystemFingerprint: "fp_ollama",
|
||||
Choices: []CompleteChunkChoice{{
|
||||
Text: r.Response,
|
||||
Index: 0,
|
||||
FinishReason: func(reason string) *string {
|
||||
if len(reason) > 0 {
|
||||
return &reason
|
||||
}
|
||||
return nil
|
||||
}(r.DoneReason),
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func toListCompletion(r api.ListResponse) ListCompletion {
|
||||
var data []Model
|
||||
for _, m := range r.Models {
|
||||
data = append(data, Model{
|
||||
Id: m.Name,
|
||||
Object: "model",
|
||||
Created: m.ModifiedAt.Unix(),
|
||||
OwnedBy: model.ParseName(m.Name).Namespace,
|
||||
})
|
||||
}
|
||||
|
||||
return ListCompletion{
|
||||
Object: "list",
|
||||
Data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func toModel(r api.ShowResponse, m string) Model {
|
||||
return Model{
|
||||
Id: m,
|
||||
Object: "model",
|
||||
Created: r.ModifiedAt.Unix(),
|
||||
OwnedBy: model.ParseName(m).Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func fromChatRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||
var messages []api.Message
|
||||
for _, msg := range r.Messages {
|
||||
messages = append(messages, api.Message{Role: msg.Role, Content: msg.Content})
|
||||
@@ -156,7 +280,7 @@ func fromRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||
switch stop := r.Stop.(type) {
|
||||
case string:
|
||||
options["stop"] = []string{stop}
|
||||
case []interface{}:
|
||||
case []any:
|
||||
var stops []string
|
||||
for _, s := range stop {
|
||||
if str, ok := s.(string); ok {
|
||||
@@ -178,9 +302,6 @@ func fromRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||
|
||||
if r.Seed != nil {
|
||||
options["seed"] = *r.Seed
|
||||
|
||||
// temperature=0 is required for reproducible outputs
|
||||
options["temperature"] = 0.0
|
||||
}
|
||||
|
||||
if r.FrequencyPenalty != nil {
|
||||
@@ -211,13 +332,78 @@ func fromRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||
}
|
||||
}
|
||||
|
||||
type writer struct {
|
||||
stream bool
|
||||
id string
|
||||
func fromCompleteRequest(r CompletionRequest) (api.GenerateRequest, error) {
|
||||
options := make(map[string]any)
|
||||
|
||||
switch stop := r.Stop.(type) {
|
||||
case string:
|
||||
options["stop"] = []string{stop}
|
||||
case []string:
|
||||
options["stop"] = stop
|
||||
default:
|
||||
if r.Stop != nil {
|
||||
return api.GenerateRequest{}, fmt.Errorf("invalid type for 'stop' field: %T", r.Stop)
|
||||
}
|
||||
}
|
||||
|
||||
if r.MaxTokens != nil {
|
||||
options["num_predict"] = *r.MaxTokens
|
||||
}
|
||||
|
||||
if r.Temperature != nil {
|
||||
options["temperature"] = *r.Temperature * 2.0
|
||||
} else {
|
||||
options["temperature"] = 1.0
|
||||
}
|
||||
|
||||
if r.Seed != nil {
|
||||
options["seed"] = *r.Seed
|
||||
}
|
||||
|
||||
options["frequency_penalty"] = r.FrequencyPenalty * 2.0
|
||||
|
||||
options["presence_penalty"] = r.PresencePenalty * 2.0
|
||||
|
||||
if r.TopP != 0.0 {
|
||||
options["top_p"] = r.TopP
|
||||
} else {
|
||||
options["top_p"] = 1.0
|
||||
}
|
||||
|
||||
return api.GenerateRequest{
|
||||
Model: r.Model,
|
||||
Prompt: r.Prompt,
|
||||
Options: options,
|
||||
Stream: &r.Stream,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type BaseWriter struct {
|
||||
gin.ResponseWriter
|
||||
}
|
||||
|
||||
func (w *writer) writeError(code int, data []byte) (int, error) {
|
||||
type ChatWriter struct {
|
||||
stream bool
|
||||
id string
|
||||
BaseWriter
|
||||
}
|
||||
|
||||
type CompleteWriter struct {
|
||||
stream bool
|
||||
id string
|
||||
BaseWriter
|
||||
}
|
||||
|
||||
type ListWriter struct {
|
||||
BaseWriter
|
||||
}
|
||||
|
||||
type RetrieveWriter struct {
|
||||
BaseWriter
|
||||
model string
|
||||
}
|
||||
|
||||
func (w *BaseWriter) writeError(code int, data []byte) (int, error) {
|
||||
var serr api.StatusError
|
||||
err := json.Unmarshal(data, &serr)
|
||||
if err != nil {
|
||||
@@ -233,7 +419,7 @@ func (w *writer) writeError(code int, data []byte) (int, error) {
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *writer) writeResponse(data []byte) (int, error) {
|
||||
func (w *ChatWriter) writeResponse(data []byte) (int, error) {
|
||||
var chatResponse api.ChatResponse
|
||||
err := json.Unmarshal(data, &chatResponse)
|
||||
if err != nil {
|
||||
@@ -273,7 +459,7 @@ func (w *writer) writeResponse(data []byte) (int, error) {
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *writer) Write(data []byte) (int, error) {
|
||||
func (w *ChatWriter) Write(data []byte) (int, error) {
|
||||
code := w.ResponseWriter.Status()
|
||||
if code != http.StatusOK {
|
||||
return w.writeError(code, data)
|
||||
@@ -282,7 +468,176 @@ func (w *writer) Write(data []byte) (int, error) {
|
||||
return w.writeResponse(data)
|
||||
}
|
||||
|
||||
func Middleware() gin.HandlerFunc {
|
||||
func (w *CompleteWriter) writeResponse(data []byte) (int, error) {
|
||||
var generateResponse api.GenerateResponse
|
||||
err := json.Unmarshal(data, &generateResponse)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// completion chunk
|
||||
if w.stream {
|
||||
d, err := json.Marshal(toCompleteChunk(w.id, generateResponse))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
w.ResponseWriter.Header().Set("Content-Type", "text/event-stream")
|
||||
_, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d)))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if generateResponse.Done {
|
||||
_, err = w.ResponseWriter.Write([]byte("data: [DONE]\n\n"))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
// completion
|
||||
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
||||
err = json.NewEncoder(w.ResponseWriter).Encode(toCompletion(w.id, generateResponse))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *CompleteWriter) Write(data []byte) (int, error) {
|
||||
code := w.ResponseWriter.Status()
|
||||
if code != http.StatusOK {
|
||||
return w.writeError(code, data)
|
||||
}
|
||||
|
||||
return w.writeResponse(data)
|
||||
}
|
||||
|
||||
func (w *ListWriter) writeResponse(data []byte) (int, error) {
|
||||
var listResponse api.ListResponse
|
||||
err := json.Unmarshal(data, &listResponse)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
||||
err = json.NewEncoder(w.ResponseWriter).Encode(toListCompletion(listResponse))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *ListWriter) Write(data []byte) (int, error) {
|
||||
code := w.ResponseWriter.Status()
|
||||
if code != http.StatusOK {
|
||||
return w.writeError(code, data)
|
||||
}
|
||||
|
||||
return w.writeResponse(data)
|
||||
}
|
||||
|
||||
func (w *RetrieveWriter) writeResponse(data []byte) (int, error) {
|
||||
var showResponse api.ShowResponse
|
||||
err := json.Unmarshal(data, &showResponse)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// retrieve completion
|
||||
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
||||
err = json.NewEncoder(w.ResponseWriter).Encode(toModel(showResponse, w.model))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (w *RetrieveWriter) Write(data []byte) (int, error) {
|
||||
code := w.ResponseWriter.Status()
|
||||
if code != http.StatusOK {
|
||||
return w.writeError(code, data)
|
||||
}
|
||||
|
||||
return w.writeResponse(data)
|
||||
}
|
||||
|
||||
func ListMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
w := &ListWriter{
|
||||
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||
}
|
||||
|
||||
c.Writer = w
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func RetrieveMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(api.ShowRequest{Name: c.Param("model")}); err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
c.Request.Body = io.NopCloser(&b)
|
||||
|
||||
// response writer
|
||||
w := &RetrieveWriter{
|
||||
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||
model: c.Param("model"),
|
||||
}
|
||||
|
||||
c.Writer = w
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func CompletionsMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
var req CompletionRequest
|
||||
err := c.ShouldBindJSON(&req)
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
genReq, err := fromCompleteRequest(req)
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(&b).Encode(genReq); err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
c.Request.Body = io.NopCloser(&b)
|
||||
|
||||
w := &CompleteWriter{
|
||||
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||
stream: req.Stream,
|
||||
id: fmt.Sprintf("cmpl-%d", rand.Intn(999)),
|
||||
}
|
||||
|
||||
c.Writer = w
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func ChatMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
var req ChatCompletionRequest
|
||||
err := c.ShouldBindJSON(&req)
|
||||
@@ -297,17 +652,17 @@ func Middleware() gin.HandlerFunc {
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(fromRequest(req)); err != nil {
|
||||
if err := json.NewEncoder(&b).Encode(fromChatRequest(req)); err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
c.Request.Body = io.NopCloser(&b)
|
||||
|
||||
w := &writer{
|
||||
ResponseWriter: c.Writer,
|
||||
stream: req.Stream,
|
||||
id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)),
|
||||
w := &ChatWriter{
|
||||
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||
stream: req.Stream,
|
||||
id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)),
|
||||
}
|
||||
|
||||
c.Writer = w
|
||||
|
298
openai/openai_test.go
Normal file
298
openai/openai_test.go
Normal file
@@ -0,0 +1,298 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMiddleware(t *testing.T) {
|
||||
type testCase struct {
|
||||
Name string
|
||||
Method string
|
||||
Path string
|
||||
TestPath string
|
||||
Handler func() gin.HandlerFunc
|
||||
Endpoint func(c *gin.Context)
|
||||
Setup func(t *testing.T, req *http.Request)
|
||||
Expected func(t *testing.T, resp *httptest.ResponseRecorder)
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
Name: "chat handler",
|
||||
Method: http.MethodPost,
|
||||
Path: "/api/chat",
|
||||
TestPath: "/api/chat",
|
||||
Handler: ChatMiddleware,
|
||||
Endpoint: func(c *gin.Context) {
|
||||
var chatReq api.ChatRequest
|
||||
if err := c.ShouldBindJSON(&chatReq); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
return
|
||||
}
|
||||
|
||||
userMessage := chatReq.Messages[0].Content
|
||||
var assistantMessage string
|
||||
|
||||
switch userMessage {
|
||||
case "Hello":
|
||||
assistantMessage = "Hello!"
|
||||
default:
|
||||
assistantMessage = "I'm not sure how to respond to that."
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, api.ChatResponse{
|
||||
Message: api.Message{
|
||||
Role: "assistant",
|
||||
Content: assistantMessage,
|
||||
},
|
||||
})
|
||||
},
|
||||
Setup: func(t *testing.T, req *http.Request) {
|
||||
body := ChatCompletionRequest{
|
||||
Model: "test-model",
|
||||
Messages: []Message{{Role: "user", Content: "Hello"}},
|
||||
}
|
||||
|
||||
bodyBytes, _ := json.Marshal(body)
|
||||
|
||||
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
},
|
||||
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||
assert.Equal(t, http.StatusOK, resp.Code)
|
||||
|
||||
var chatResp ChatCompletion
|
||||
if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if chatResp.Object != "chat.completion" {
|
||||
t.Fatalf("expected chat.completion, got %s", chatResp.Object)
|
||||
}
|
||||
|
||||
if chatResp.Choices[0].Message.Content != "Hello!" {
|
||||
t.Fatalf("expected Hello!, got %s", chatResp.Choices[0].Message.Content)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "completions handler",
|
||||
Method: http.MethodPost,
|
||||
Path: "/api/generate",
|
||||
TestPath: "/api/generate",
|
||||
Handler: CompletionsMiddleware,
|
||||
Endpoint: func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, api.GenerateResponse{
|
||||
Response: "Hello!",
|
||||
})
|
||||
},
|
||||
Setup: func(t *testing.T, req *http.Request) {
|
||||
body := CompletionRequest{
|
||||
Model: "test-model",
|
||||
Prompt: "Hello",
|
||||
}
|
||||
|
||||
bodyBytes, _ := json.Marshal(body)
|
||||
|
||||
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
},
|
||||
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||
assert.Equal(t, http.StatusOK, resp.Code)
|
||||
var completionResp Completion
|
||||
if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if completionResp.Object != "text_completion" {
|
||||
t.Fatalf("expected text_completion, got %s", completionResp.Object)
|
||||
}
|
||||
|
||||
if completionResp.Choices[0].Text != "Hello!" {
|
||||
t.Fatalf("expected Hello!, got %s", completionResp.Choices[0].Text)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "completions handler with params",
|
||||
Method: http.MethodPost,
|
||||
Path: "/api/generate",
|
||||
TestPath: "/api/generate",
|
||||
Handler: CompletionsMiddleware,
|
||||
Endpoint: func(c *gin.Context) {
|
||||
var generateReq api.GenerateRequest
|
||||
if err := c.ShouldBindJSON(&generateReq); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
return
|
||||
}
|
||||
|
||||
temperature := generateReq.Options["temperature"].(float64)
|
||||
var assistantMessage string
|
||||
|
||||
switch temperature {
|
||||
case 1.6:
|
||||
assistantMessage = "Received temperature of 1.6"
|
||||
default:
|
||||
assistantMessage = fmt.Sprintf("Received temperature of %f", temperature)
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, api.GenerateResponse{
|
||||
Response: assistantMessage,
|
||||
})
|
||||
},
|
||||
Setup: func(t *testing.T, req *http.Request) {
|
||||
temp := float32(0.8)
|
||||
body := CompletionRequest{
|
||||
Model: "test-model",
|
||||
Prompt: "Hello",
|
||||
Temperature: &temp,
|
||||
}
|
||||
|
||||
bodyBytes, _ := json.Marshal(body)
|
||||
|
||||
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
},
|
||||
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||
assert.Equal(t, http.StatusOK, resp.Code)
|
||||
var completionResp Completion
|
||||
if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if completionResp.Object != "text_completion" {
|
||||
t.Fatalf("expected text_completion, got %s", completionResp.Object)
|
||||
}
|
||||
|
||||
if completionResp.Choices[0].Text != "Received temperature of 1.6" {
|
||||
t.Fatalf("expected Received temperature of 1.6, got %s", completionResp.Choices[0].Text)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "completions handler with error",
|
||||
Method: http.MethodPost,
|
||||
Path: "/api/generate",
|
||||
TestPath: "/api/generate",
|
||||
Handler: CompletionsMiddleware,
|
||||
Endpoint: func(c *gin.Context) {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
},
|
||||
Setup: func(t *testing.T, req *http.Request) {
|
||||
body := CompletionRequest{
|
||||
Model: "test-model",
|
||||
Prompt: "Hello",
|
||||
}
|
||||
|
||||
bodyBytes, _ := json.Marshal(body)
|
||||
|
||||
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
},
|
||||
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||
if resp.Code != http.StatusBadRequest {
|
||||
t.Fatalf("expected 400, got %d", resp.Code)
|
||||
}
|
||||
|
||||
if !strings.Contains(resp.Body.String(), `"invalid request"`) {
|
||||
t.Fatalf("error was not forwarded")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "list handler",
|
||||
Method: http.MethodGet,
|
||||
Path: "/api/tags",
|
||||
TestPath: "/api/tags",
|
||||
Handler: ListMiddleware,
|
||||
Endpoint: func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, api.ListResponse{
|
||||
Models: []api.ListModelResponse{
|
||||
{
|
||||
Name: "Test Model",
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||
assert.Equal(t, http.StatusOK, resp.Code)
|
||||
|
||||
var listResp ListCompletion
|
||||
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if listResp.Object != "list" {
|
||||
t.Fatalf("expected list, got %s", listResp.Object)
|
||||
}
|
||||
|
||||
if len(listResp.Data) != 1 {
|
||||
t.Fatalf("expected 1, got %d", len(listResp.Data))
|
||||
}
|
||||
|
||||
if listResp.Data[0].Id != "Test Model" {
|
||||
t.Fatalf("expected Test Model, got %s", listResp.Data[0].Id)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "retrieve model",
|
||||
Method: http.MethodGet,
|
||||
Path: "/api/show/:model",
|
||||
TestPath: "/api/show/test-model",
|
||||
Handler: RetrieveMiddleware,
|
||||
Endpoint: func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, api.ShowResponse{
|
||||
ModifiedAt: time.Date(2024, 6, 17, 13, 45, 0, 0, time.UTC),
|
||||
})
|
||||
},
|
||||
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||
var retrieveResp Model
|
||||
if err := json.NewDecoder(resp.Body).Decode(&retrieveResp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if retrieveResp.Object != "model" {
|
||||
t.Fatalf("Expected object to be model, got %s", retrieveResp.Object)
|
||||
}
|
||||
|
||||
if retrieveResp.Id != "test-model" {
|
||||
t.Fatalf("Expected id to be test-model, got %s", retrieveResp.Id)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
gin.SetMode(gin.TestMode)
|
||||
router := gin.New()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
router = gin.New()
|
||||
router.Use(tc.Handler())
|
||||
router.Handle(tc.Method, tc.Path, tc.Endpoint)
|
||||
req, _ := http.NewRequest(tc.Method, tc.TestPath, nil)
|
||||
|
||||
if tc.Setup != nil {
|
||||
tc.Setup(t, req)
|
||||
}
|
||||
|
||||
resp := httptest.NewRecorder()
|
||||
router.ServeHTTP(resp, req)
|
||||
|
||||
tc.Expected(t, resp)
|
||||
})
|
||||
}
|
||||
}
|
@@ -124,7 +124,7 @@ func ParseFile(r io.Reader) (*File, error) {
|
||||
case stateComment, stateNil:
|
||||
// pass
|
||||
case stateValue:
|
||||
s, ok := unquote(b.String())
|
||||
s, ok := unquote(strings.TrimSpace(b.String()))
|
||||
if !ok || isSpace(r) {
|
||||
if _, err := b.WriteRune(r); err != nil {
|
||||
return nil, err
|
||||
@@ -158,7 +158,7 @@ func ParseFile(r io.Reader) (*File, error) {
|
||||
case stateComment, stateNil:
|
||||
// pass; nothing to flush
|
||||
case stateValue:
|
||||
s, ok := unquote(b.String())
|
||||
s, ok := unquote(strings.TrimSpace(b.String()))
|
||||
if !ok {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
@@ -22,7 +22,13 @@ ADAPTER adapter1
|
||||
LICENSE MIT
|
||||
PARAMETER param1 value1
|
||||
PARAMETER param2 value2
|
||||
TEMPLATE template1
|
||||
TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
|
||||
|
||||
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
{{ .Response }}<|eot_id|>"""
|
||||
`
|
||||
|
||||
reader := strings.NewReader(input)
|
||||
@@ -36,7 +42,40 @@ TEMPLATE template1
|
||||
{Name: "license", Args: "MIT"},
|
||||
{Name: "param1", Args: "value1"},
|
||||
{Name: "param2", Args: "value2"},
|
||||
{Name: "template", Args: "template1"},
|
||||
{Name: "template", Args: "{{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|>"},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedCommands, modelfile.Commands)
|
||||
}
|
||||
|
||||
func TestParseFileTrimSpace(t *testing.T) {
|
||||
input := `
|
||||
FROM " model 1"
|
||||
ADAPTER adapter3
|
||||
LICENSE "MIT "
|
||||
PARAMETER param1 value1
|
||||
PARAMETER param2 value2
|
||||
TEMPLATE """ {{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
|
||||
|
||||
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
{{ .Response }}<|eot_id|> """
|
||||
`
|
||||
|
||||
reader := strings.NewReader(input)
|
||||
|
||||
modelfile, err := ParseFile(reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedCommands := []Command{
|
||||
{Name: "model", Args: " model 1"},
|
||||
{Name: "adapter", Args: "adapter3"},
|
||||
{Name: "license", Args: "MIT "},
|
||||
{Name: "param1", Args: "value1"},
|
||||
{Name: "param2", Args: "value2"},
|
||||
{Name: "template", Args: " {{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|> "},
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedCommands, modelfile.Commands)
|
||||
@@ -48,6 +87,26 @@ func TestParseFileFrom(t *testing.T) {
|
||||
expected []Command
|
||||
err error
|
||||
}{
|
||||
{
|
||||
"FROM \"FOO BAR \"",
|
||||
[]Command{{Name: "model", Args: "FOO BAR "}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"FROM \"FOO BAR\"\nPARAMETER param1 value1",
|
||||
[]Command{{Name: "model", Args: "FOO BAR"}, {Name: "param1", Args: "value1"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"FROM FOOO BAR ",
|
||||
[]Command{{Name: "model", Args: "FOOO BAR"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"FROM /what/is/the path ",
|
||||
[]Command{{Name: "model", Args: "/what/is/the path"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"FROM foo",
|
||||
[]Command{{Name: "model", Args: "foo"}},
|
||||
@@ -86,6 +145,11 @@ func TestParseFileFrom(t *testing.T) {
|
||||
[]Command{{Name: "param1", Args: "value1"}, {Name: "model", Args: "foo"}},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"PARAMETER what the \nFROM lemons make lemonade ",
|
||||
[]Command{{Name: "what", Args: "the"}, {Name: "model", Args: "lemons make lemonade"}},
|
||||
nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
@@ -399,7 +463,7 @@ func TestParseFileParameters(t *testing.T) {
|
||||
"mirostat_eta 1.0": {"mirostat_eta", "1.0"},
|
||||
"penalize_newline true": {"penalize_newline", "true"},
|
||||
"stop ### User:": {"stop", "### User:"},
|
||||
"stop ### User: ": {"stop", "### User: "},
|
||||
"stop ### User: ": {"stop", "### User:"},
|
||||
"stop \"### User:\"": {"stop", "### User:"},
|
||||
"stop \"### User: \"": {"stop", "### User: "},
|
||||
"stop \"\"\"### User:\"\"\"": {"stop", "### User:"},
|
||||
|
@@ -103,19 +103,19 @@ function buildApp() {
|
||||
function gatherDependencies() {
|
||||
write-host "Gathering runtime dependencies"
|
||||
cd "${script:SRC_DIR}"
|
||||
md "${script:DEPS_DIR}" -ea 0 > $null
|
||||
md "${script:DEPS_DIR}\ollama_runners" -ea 0 > $null
|
||||
|
||||
# TODO - this varies based on host build system and MSVC version - drive from dumpbin output
|
||||
# currently works for Win11 + MSVC 2019 + Cuda V11
|
||||
cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140.dll" "${script:DEPS_DIR}\"
|
||||
cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DEPS_DIR}\"
|
||||
cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DEPS_DIR}\"
|
||||
cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140.dll" "${script:DEPS_DIR}\ollama_runners\"
|
||||
cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DEPS_DIR}\ollama_runners\"
|
||||
cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DEPS_DIR}\ollama_runners\"
|
||||
|
||||
|
||||
cp "${script:SRC_DIR}\app\ollama_welcome.ps1" "${script:SRC_DIR}\dist\"
|
||||
if ("${env:KEY_CONTAINER}") {
|
||||
write-host "about to sign"
|
||||
foreach ($file in (get-childitem "${script:DEPS_DIR}/cu*.dll") + @("${script:SRC_DIR}\dist\ollama_welcome.ps1")){
|
||||
foreach ($file in (get-childitem "${script:DEPS_DIR}\cuda\cu*.dll") + @("${script:SRC_DIR}\dist\ollama_welcome.ps1")){
|
||||
write-host "signing $file"
|
||||
& "${script:SignTool}" sign /v /fd sha256 /t http://timestamp.digicert.com /f "${script:OLLAMA_CERT}" `
|
||||
/csp "Google Cloud KMS Provider" /kc ${env:KEY_CONTAINER} $file
|
||||
|
@@ -159,8 +159,8 @@ check_gpu() {
|
||||
esac ;;
|
||||
lshw)
|
||||
case $2 in
|
||||
nvidia) available lshw && $SUDO lshw -c display -numeric | grep -q 'vendor: .* \[10DE\]' || return 1 ;;
|
||||
amdgpu) available lshw && $SUDO lshw -c display -numeric | grep -q 'vendor: .* \[1002\]' || return 1 ;;
|
||||
nvidia) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[10DE\]' || return 1 ;;
|
||||
amdgpu) available lshw && $SUDO lshw -c display -numeric -disable network | grep -q 'vendor: .* \[1002\]' || return 1 ;;
|
||||
esac ;;
|
||||
nvidia-smi) available nvidia-smi || return 1 ;;
|
||||
esac
|
||||
@@ -279,7 +279,7 @@ if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\
|
||||
case $OS_NAME in
|
||||
centos|rhel) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -d '.' -f 1) ;;
|
||||
rocky) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -c1) ;;
|
||||
fedora) [ $OS_VERSION -lt '37' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '37';;
|
||||
fedora) [ $OS_VERSION -lt '39' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '39';;
|
||||
amzn) install_cuda_driver_yum 'fedora' '37' ;;
|
||||
debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;;
|
||||
ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed 's/\.//') ;;
|
||||
|
@@ -6,10 +6,21 @@ set -ex
|
||||
MACHINE=$(uname -m)
|
||||
|
||||
if grep -i "centos" /etc/system-release >/dev/null; then
|
||||
# As of 7/1/2024 mirrorlist.centos.org has been taken offline, so adjust accordingly
|
||||
sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
# Centos 7 derivatives have too old of a git version to run our generate script
|
||||
# uninstall and ignore failures
|
||||
yum remove -y git
|
||||
yum -y install epel-release centos-release-scl
|
||||
|
||||
# The release packages reinstate the mirrors, undo that again
|
||||
sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||
sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||
sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
|
||||
|
||||
yum -y install dnf
|
||||
if [ "${MACHINE}" = "x86_64" ]; then
|
||||
yum -y install https://repo.ius.io/ius-release-el7.rpm
|
||||
|
@@ -28,11 +28,16 @@ import (
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/parser"
|
||||
"github.com/ollama/ollama/template"
|
||||
"github.com/ollama/ollama/types/errtypes"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
"github.com/ollama/ollama/version"
|
||||
)
|
||||
|
||||
type Capability string
|
||||
|
||||
const CapabilityCompletion = Capability("completion")
|
||||
|
||||
type registryOptions struct {
|
||||
Insecure bool
|
||||
Username string
|
||||
@@ -48,16 +53,43 @@ type Model struct {
|
||||
ParentModel string
|
||||
AdapterPaths []string
|
||||
ProjectorPaths []string
|
||||
Template string
|
||||
System string
|
||||
License []string
|
||||
Digest string
|
||||
Options map[string]interface{}
|
||||
Messages []Message
|
||||
|
||||
Template *template.Template
|
||||
}
|
||||
|
||||
func (m *Model) IsEmbedding() bool {
|
||||
return slices.Contains(m.Config.ModelFamilies, "bert") || slices.Contains(m.Config.ModelFamilies, "nomic-bert")
|
||||
func (m *Model) Has(caps ...Capability) bool {
|
||||
for _, cap := range caps {
|
||||
switch cap {
|
||||
case CapabilityCompletion:
|
||||
f, err := os.Open(m.ModelPath)
|
||||
if err != nil {
|
||||
slog.Error("couldn't open model file", "error", err)
|
||||
continue
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// TODO(mxyng): decode the GGML into model to avoid doing this multiple times
|
||||
ggml, _, err := llm.DecodeGGML(f, 0)
|
||||
if err != nil {
|
||||
slog.Error("couldn't decode ggml", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := ggml.KV()[fmt.Sprintf("%s.pooling_type", ggml.KV().Architecture())]; ok {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
slog.Error("unknown capability", "capability", cap)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Model) String() string {
|
||||
@@ -82,10 +114,10 @@ func (m *Model) String() string {
|
||||
})
|
||||
}
|
||||
|
||||
if m.Template != "" {
|
||||
if m.Template != nil {
|
||||
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||
Name: "template",
|
||||
Args: m.Template,
|
||||
Args: m.Template.String(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -135,13 +167,6 @@ type Message struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type ManifestV2 struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Config *Layer `json:"config"`
|
||||
Layers []*Layer `json:"layers"`
|
||||
}
|
||||
|
||||
type ConfigV2 struct {
|
||||
ModelFormat string `json:"model_format"`
|
||||
ModelFamily string `json:"model_family"`
|
||||
@@ -160,7 +185,7 @@ type RootFS struct {
|
||||
DiffIDs []string `json:"diff_ids"`
|
||||
}
|
||||
|
||||
func GetManifest(mp ModelPath) (*ManifestV2, string, error) {
|
||||
func GetManifest(mp ModelPath) (*Manifest, string, error) {
|
||||
fp, err := mp.GetManifestPath()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
@@ -170,7 +195,7 @@ func GetManifest(mp ModelPath) (*ManifestV2, string, error) {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
var manifest *ManifestV2
|
||||
var manifest *Manifest
|
||||
|
||||
bts, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
@@ -198,8 +223,7 @@ func GetModel(name string) (*Model, error) {
|
||||
Name: mp.GetFullTagname(),
|
||||
ShortName: mp.GetShortTagname(),
|
||||
Digest: digest,
|
||||
Template: "{{ .Prompt }}",
|
||||
License: []string{},
|
||||
Template: template.DefaultTemplate,
|
||||
}
|
||||
|
||||
filename, err := GetBlobsPath(manifest.Config.Digest)
|
||||
@@ -235,13 +259,17 @@ func GetModel(name string) (*Model, error) {
|
||||
model.AdapterPaths = append(model.AdapterPaths, filename)
|
||||
case "application/vnd.ollama.image.projector":
|
||||
model.ProjectorPaths = append(model.ProjectorPaths, filename)
|
||||
case "application/vnd.ollama.image.template":
|
||||
case "application/vnd.ollama.image.prompt",
|
||||
"application/vnd.ollama.image.template":
|
||||
bts, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
model.Template = string(bts)
|
||||
model.Template, err = template.Parse(string(bts))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "application/vnd.ollama.image.system":
|
||||
bts, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
@@ -249,13 +277,6 @@ func GetModel(name string) (*Model, error) {
|
||||
}
|
||||
|
||||
model.System = string(bts)
|
||||
case "application/vnd.ollama.image.prompt":
|
||||
bts, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
model.Template = string(bts)
|
||||
case "application/vnd.ollama.image.params":
|
||||
params, err := os.Open(filename)
|
||||
if err != nil {
|
||||
@@ -414,17 +435,22 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
|
||||
return err
|
||||
}
|
||||
|
||||
layers, err := parseFromFile(ctx, temp, "", fn)
|
||||
layer, err := NewLayer(temp, baseLayer.MediaType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(layers) != 1 {
|
||||
return errors.New("quantization failed")
|
||||
if _, err := temp.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseLayer.Layer = layers[0].Layer
|
||||
baseLayer.GGML = layers[0].GGML
|
||||
ggml, _, err := llm.DecodeGGML(temp, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseLayer.Layer = layer
|
||||
baseLayer.GGML = ggml
|
||||
}
|
||||
}
|
||||
|
||||
@@ -817,7 +843,7 @@ func PushModel(ctx context.Context, name string, regOpts *registryOptions, fn fu
|
||||
func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn func(api.ProgressResponse)) error {
|
||||
mp := ParseModelPath(name)
|
||||
|
||||
var manifest *ManifestV2
|
||||
var manifest *Manifest
|
||||
var err error
|
||||
var noprune string
|
||||
|
||||
@@ -924,7 +950,7 @@ func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn fu
|
||||
return nil
|
||||
}
|
||||
|
||||
func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptions) (*ManifestV2, error) {
|
||||
func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptions) (*Manifest, error) {
|
||||
requestURL := mp.BaseURL().JoinPath("v2", mp.GetNamespaceRepository(), "manifests", mp.Tag)
|
||||
|
||||
headers := make(http.Header)
|
||||
@@ -935,7 +961,7 @@ func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptio
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var m *ManifestV2
|
||||
var m *Manifest
|
||||
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -14,7 +14,10 @@ import (
|
||||
)
|
||||
|
||||
type Manifest struct {
|
||||
ManifestV2
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Config *Layer `json:"config"`
|
||||
Layers []*Layer `json:"layers"`
|
||||
|
||||
filepath string
|
||||
fi os.FileInfo
|
||||
@@ -66,7 +69,7 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) {
|
||||
|
||||
p := filepath.Join(manifests, n.Filepath())
|
||||
|
||||
var m ManifestV2
|
||||
var m Manifest
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -83,12 +86,11 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Manifest{
|
||||
ManifestV2: m,
|
||||
filepath: p,
|
||||
fi: fi,
|
||||
digest: fmt.Sprintf("%x", sha256sum.Sum(nil)),
|
||||
}, nil
|
||||
m.filepath = p
|
||||
m.fi = fi
|
||||
m.digest = fmt.Sprintf("%x", sha256sum.Sum(nil))
|
||||
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
func WriteManifest(name model.Name, config *Layer, layers []*Layer) error {
|
||||
@@ -108,7 +110,7 @@ func WriteManifest(name model.Name, config *Layer, layers []*Layer) error {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m := ManifestV2{
|
||||
m := Manifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
|
||||
Config: config,
|
||||
|
@@ -25,7 +25,7 @@ func createManifest(t *testing.T, path, name string) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := json.NewEncoder(f).Encode(ManifestV2{}); err != nil {
|
||||
if err := json.NewEncoder(f).Encode(Manifest{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/convert"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/templates"
|
||||
"github.com/ollama/ollama/template"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
@@ -63,7 +63,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
|
||||
}
|
||||
defer blob.Close()
|
||||
|
||||
ggml, _, err := llm.DecodeGGML(blob)
|
||||
ggml, _, err := llm.DecodeGGML(blob, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -77,62 +77,79 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
|
||||
func extractFromZipFile(p string, file *os.File, fn func(api.ProgressResponse)) error {
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := zip.NewReader(file, stat.Size())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
tempdir, err := os.MkdirTemp(filepath.Dir(file.Name()), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tempdir)
|
||||
|
||||
fn(api.ProgressResponse{Status: "unpacking model metadata"})
|
||||
for _, f := range r.File {
|
||||
if !filepath.IsLocal(f.Name) {
|
||||
return fmt.Errorf("%w: %s", zip.ErrInsecurePath, f.Name)
|
||||
}
|
||||
|
||||
n := filepath.Join(p, f.Name)
|
||||
if err := os.MkdirAll(filepath.Dir(n), 0o750); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO(mxyng): this should not write out all files to disk
|
||||
outfile, err := os.Create(filepath.Join(tempdir, f.Name))
|
||||
outfile, err := os.Create(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer outfile.Close()
|
||||
|
||||
infile, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer infile.Close()
|
||||
|
||||
if _, err = io.Copy(outfile, infile); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if err := outfile.Close(); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if err := infile.Close(); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
mf, err := convert.GetModelFormat(tempdir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
|
||||
tempDir, err := os.MkdirTemp(filepath.Dir(file.Name()), "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
if err := extractFromZipFile(tempDir, file, fn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mf, err := convert.GetModelFormat(tempDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
params, err := mf.GetParams(tempdir)
|
||||
params, err := mf.GetParams(tempDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mArch, err := mf.GetModelArch("", tempdir, params)
|
||||
mArch, err := mf.GetModelArch("", tempDir, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -150,7 +167,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a
|
||||
|
||||
// TODO(mxyng): this should write directly into a layer
|
||||
// e.g. NewLayer(arch.Reader(), "application/vnd.ollama.image.model")
|
||||
temp, err := os.CreateTemp(tempdir, "fp16")
|
||||
temp, err := os.CreateTemp(tempDir, "fp16")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -176,7 +193,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a
|
||||
}
|
||||
defer bin.Close()
|
||||
|
||||
ggml, _, err := llm.DecodeGGML(bin)
|
||||
ggml, _, err := llm.DecodeGGML(bin, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -210,7 +227,7 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap
|
||||
|
||||
var offset int64
|
||||
for offset < stat.Size() {
|
||||
ggml, n, err := llm.DecodeGGML(file)
|
||||
ggml, n, err := llm.DecodeGGML(file, 0)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -239,7 +256,7 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap
|
||||
func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) {
|
||||
for _, layer := range layers {
|
||||
if s := layer.GGML.KV().ChatTemplate(); s != "" {
|
||||
if t, err := templates.NamedTemplate(s); err != nil {
|
||||
if t, err := template.Named(s); err != nil {
|
||||
slog.Debug("template detection", "error", err)
|
||||
} else {
|
||||
tmpl, err := NewLayer(t.Reader(), "application/vnd.ollama.image.template")
|
||||
|
112
server/model_test.go
Normal file
112
server/model_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
func createZipFile(t *testing.T, name string) *os.File {
|
||||
t.Helper()
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
zf := zip.NewWriter(f)
|
||||
defer zf.Close()
|
||||
|
||||
zh, err := zf.CreateHeader(&zip.FileHeader{Name: name})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(zh, bytes.NewReader([]byte(""))); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func TestExtractFromZipFile(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
expect []string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "good",
|
||||
expect: []string{"good"},
|
||||
},
|
||||
{
|
||||
name: strings.Join([]string{"path", "..", "to", "good"}, string(os.PathSeparator)),
|
||||
expect: []string{filepath.Join("to", "good")},
|
||||
},
|
||||
{
|
||||
name: strings.Join([]string{"path", "..", "to", "..", "good"}, string(os.PathSeparator)),
|
||||
expect: []string{"good"},
|
||||
},
|
||||
{
|
||||
name: strings.Join([]string{"path", "to", "..", "..", "good"}, string(os.PathSeparator)),
|
||||
expect: []string{"good"},
|
||||
},
|
||||
{
|
||||
name: strings.Join([]string{"..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "bad"}, string(os.PathSeparator)),
|
||||
err: zip.ErrInsecurePath,
|
||||
},
|
||||
{
|
||||
name: strings.Join([]string{"path", "..", "..", "to", "bad"}, string(os.PathSeparator)),
|
||||
err: zip.ErrInsecurePath,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range cases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := createZipFile(t, tt.name)
|
||||
defer f.Close()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
if err := extractFromZipFile(tempDir, f, func(api.ProgressResponse) {}); !errors.Is(err, tt.err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var matches []string
|
||||
if err := filepath.Walk(tempDir, func(p string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
matches = append(matches, p)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var actual []string
|
||||
for _, match := range matches {
|
||||
rel, err := filepath.Rel(tempDir, match)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
actual = append(actual, rel)
|
||||
}
|
||||
|
||||
if !slices.Equal(actual, tt.expect) {
|
||||
t.Fatalf("expected %d files, got %d", len(tt.expect), len(matches))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -103,18 +103,9 @@ func (mp ModelPath) GetShortTagname() string {
|
||||
return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag)
|
||||
}
|
||||
|
||||
// modelsDir returns the value of the OLLAMA_MODELS environment variable or the user's home directory if OLLAMA_MODELS is not set.
|
||||
// The models directory is where Ollama stores its model files and manifests.
|
||||
func modelsDir() (string, error) {
|
||||
return envconfig.ModelsDir, nil
|
||||
}
|
||||
|
||||
// GetManifestPath returns the path to the manifest file for the given model path, it is up to the caller to create the directory if it does not exist.
|
||||
func (mp ModelPath) GetManifestPath() (string, error) {
|
||||
dir, err := modelsDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dir := envconfig.ModelsDir
|
||||
|
||||
return filepath.Join(dir, "manifests", mp.Registry, mp.Namespace, mp.Repository, mp.Tag), nil
|
||||
}
|
||||
@@ -127,10 +118,7 @@ func (mp ModelPath) BaseURL() *url.URL {
|
||||
}
|
||||
|
||||
func GetManifestPath() (string, error) {
|
||||
dir, err := modelsDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dir := envconfig.ModelsDir
|
||||
|
||||
path := filepath.Join(dir, "manifests")
|
||||
if err := os.MkdirAll(path, 0o755); err != nil {
|
||||
@@ -141,10 +129,7 @@ func GetManifestPath() (string, error) {
|
||||
}
|
||||
|
||||
func GetBlobsPath(digest string) (string, error) {
|
||||
dir, err := modelsDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dir := envconfig.ModelsDir
|
||||
|
||||
// only accept actual sha256 digests
|
||||
pattern := "^sha256[:-][0-9a-fA-F]{64}$"
|
||||
|
@@ -4,10 +4,11 @@ import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"text/template/parse"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/template"
|
||||
)
|
||||
|
||||
// isResponseNode checks if the node contains .Response
|
||||
@@ -53,13 +54,8 @@ func formatTemplateForResponse(tmpl *template.Template, generate bool) {
|
||||
|
||||
// Prompt renders a prompt from a template. If generate is set to true,
|
||||
// the response and parts of the template following it are not rendered
|
||||
func Prompt(tmpl, system, prompt, response string, generate bool) (string, error) {
|
||||
parsed, err := template.New("").Option("missingkey=zero").Parse(tmpl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
formatTemplateForResponse(parsed, generate)
|
||||
func Prompt(tmpl *template.Template, system, prompt, response string, generate bool) (string, error) {
|
||||
formatTemplateForResponse(tmpl, generate)
|
||||
|
||||
vars := map[string]any{
|
||||
"System": system,
|
||||
@@ -68,14 +64,14 @@ func Prompt(tmpl, system, prompt, response string, generate bool) (string, error
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
if err := parsed.Execute(&sb, vars); err != nil {
|
||||
if err := tmpl.Execute(&sb, vars); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return sb.String(), nil
|
||||
}
|
||||
|
||||
func countTokens(tmpl string, system string, prompt string, response string, encode func(string) ([]int, error)) (int, error) {
|
||||
func countTokens(tmpl *template.Template, system string, prompt string, response string, encode func(string) ([]int, error)) (int, error) {
|
||||
rendered, err := Prompt(tmpl, system, prompt, response, false)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -91,7 +87,7 @@ func countTokens(tmpl string, system string, prompt string, response string, enc
|
||||
}
|
||||
|
||||
// ChatPrompt builds up a prompt from a series of messages, truncating based on context window size
|
||||
func ChatPrompt(tmpl string, messages []api.Message, window int, encode func(string) ([]int, error)) (string, error) {
|
||||
func ChatPrompt(tmpl *template.Template, messages []api.Message, window int, encode func(string) ([]int, error)) (string, error) {
|
||||
type prompt struct {
|
||||
System string
|
||||
Prompt string
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/template"
|
||||
)
|
||||
|
||||
func TestPrompt(t *testing.T) {
|
||||
@@ -61,7 +62,12 @@ func TestPrompt(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := Prompt(tc.template, tc.system, tc.prompt, tc.response, tc.generate)
|
||||
tmpl, err := template.Parse(tc.template)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, err := Prompt(tmpl, tc.system, tc.prompt, tc.response, tc.generate)
|
||||
if err != nil {
|
||||
t.Errorf("error = %v", err)
|
||||
}
|
||||
@@ -192,7 +198,12 @@ func TestChatPrompt(t *testing.T) {
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := ChatPrompt(tc.template, tc.messages, tc.window, encode)
|
||||
tmpl, err := template.Parse(tc.template)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, err := ChatPrompt(tmpl, tc.messages, tc.window, encode)
|
||||
if err != nil {
|
||||
t.Errorf("error = %v", err)
|
||||
}
|
||||
|
201
server/routes.go
201
server/routes.go
@@ -9,7 +9,6 @@ import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
@@ -31,6 +29,7 @@ import (
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/openai"
|
||||
"github.com/ollama/ollama/parser"
|
||||
"github.com/ollama/ollama/template"
|
||||
"github.com/ollama/ollama/types/errtypes"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
"github.com/ollama/ollama/version"
|
||||
@@ -55,8 +54,6 @@ func init() {
|
||||
gin.SetMode(mode)
|
||||
}
|
||||
|
||||
var defaultSessionDuration = 5 * time.Minute
|
||||
|
||||
func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options, error) {
|
||||
opts := api.DefaultOptions()
|
||||
if err := opts.FromMap(model.Options); err != nil {
|
||||
@@ -121,8 +118,8 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if model.IsEmbedding() {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "embedding models do not support generate"})
|
||||
if !model.Has(CapabilityCompletion) {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support generate", req.Model)})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -132,14 +129,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var sessionDuration time.Duration
|
||||
if req.KeepAlive == nil {
|
||||
sessionDuration = getDefaultSessionDuration()
|
||||
} else {
|
||||
sessionDuration = req.KeepAlive.Duration
|
||||
}
|
||||
|
||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration)
|
||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive)
|
||||
var runner *runnerRef
|
||||
select {
|
||||
case runner = <-rCh:
|
||||
@@ -161,6 +151,12 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
tmpl, err := template.Parse(req.Template)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
checkpointLoaded := time.Now()
|
||||
|
||||
var prompt string
|
||||
@@ -169,7 +165,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
prompt = req.Prompt
|
||||
case req.Prompt != "":
|
||||
if req.Template == "" {
|
||||
req.Template = model.Template
|
||||
tmpl = model.Template
|
||||
}
|
||||
|
||||
if req.System == "" {
|
||||
@@ -187,7 +183,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
|
||||
sb.WriteString(req.Prompt)
|
||||
|
||||
p, err := Prompt(req.Template, req.System, sb.String(), "", true)
|
||||
p, err := Prompt(tmpl, req.System, sb.String(), "", true)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
@@ -242,7 +238,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
resp.LoadDuration = checkpointLoaded.Sub(checkpointStart)
|
||||
|
||||
if !req.Raw {
|
||||
p, err := Prompt(req.Template, req.System, req.Prompt, generated.String(), false)
|
||||
p, err := Prompt(tmpl, req.System, req.Prompt, generated.String(), false)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
@@ -313,32 +309,6 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
streamResponse(c, ch)
|
||||
}
|
||||
|
||||
func getDefaultSessionDuration() time.Duration {
|
||||
if envconfig.KeepAlive != "" {
|
||||
v, err := strconv.Atoi(envconfig.KeepAlive)
|
||||
if err != nil {
|
||||
d, err := time.ParseDuration(envconfig.KeepAlive)
|
||||
if err != nil {
|
||||
return defaultSessionDuration
|
||||
}
|
||||
|
||||
if d < 0 {
|
||||
return time.Duration(math.MaxInt64)
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
d := time.Duration(v) * time.Second
|
||||
if d < 0 {
|
||||
return time.Duration(math.MaxInt64)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
return defaultSessionDuration
|
||||
}
|
||||
|
||||
func (s *Server) EmbeddingsHandler(c *gin.Context) {
|
||||
var req api.EmbeddingRequest
|
||||
err := c.ShouldBindJSON(&req)
|
||||
@@ -373,14 +343,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var sessionDuration time.Duration
|
||||
if req.KeepAlive == nil {
|
||||
sessionDuration = getDefaultSessionDuration()
|
||||
} else {
|
||||
sessionDuration = req.KeepAlive.Duration
|
||||
}
|
||||
|
||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration)
|
||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive)
|
||||
var runner *runnerRef
|
||||
select {
|
||||
case runner = <-rCh:
|
||||
@@ -646,9 +609,12 @@ func (s *Server) ShowModelHandler(c *gin.Context) {
|
||||
|
||||
resp, err := GetModelInfo(req)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model '%s' not found", req.Model)})
|
||||
} else {
|
||||
case err.Error() == "invalid model name":
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
default:
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
}
|
||||
return
|
||||
@@ -658,44 +624,58 @@ func (s *Server) ShowModelHandler(c *gin.Context) {
|
||||
}
|
||||
|
||||
func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) {
|
||||
model, err := GetModel(req.Model)
|
||||
m, err := GetModel(req.Model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
modelDetails := api.ModelDetails{
|
||||
ParentModel: model.ParentModel,
|
||||
Format: model.Config.ModelFormat,
|
||||
Family: model.Config.ModelFamily,
|
||||
Families: model.Config.ModelFamilies,
|
||||
ParameterSize: model.Config.ModelType,
|
||||
QuantizationLevel: model.Config.FileType,
|
||||
ParentModel: m.ParentModel,
|
||||
Format: m.Config.ModelFormat,
|
||||
Family: m.Config.ModelFamily,
|
||||
Families: m.Config.ModelFamilies,
|
||||
ParameterSize: m.Config.ModelType,
|
||||
QuantizationLevel: m.Config.FileType,
|
||||
}
|
||||
|
||||
if req.System != "" {
|
||||
model.System = req.System
|
||||
m.System = req.System
|
||||
}
|
||||
|
||||
if req.Template != "" {
|
||||
model.Template = req.Template
|
||||
m.Template, err = template.Parse(req.Template)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
msgs := make([]api.Message, 0)
|
||||
for _, msg := range model.Messages {
|
||||
for _, msg := range m.Messages {
|
||||
msgs = append(msgs, api.Message{Role: msg.Role, Content: msg.Content})
|
||||
}
|
||||
|
||||
n := model.ParseName(req.Model)
|
||||
if !n.IsValid() {
|
||||
return nil, fmt.Errorf("invalid model name")
|
||||
}
|
||||
|
||||
manifest, err := ParseNamedManifest(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &api.ShowResponse{
|
||||
License: strings.Join(model.License, "\n"),
|
||||
System: model.System,
|
||||
Template: model.Template,
|
||||
Details: modelDetails,
|
||||
Messages: msgs,
|
||||
License: strings.Join(m.License, "\n"),
|
||||
System: m.System,
|
||||
Template: m.Template.String(),
|
||||
Details: modelDetails,
|
||||
Messages: msgs,
|
||||
ModifiedAt: manifest.fi.ModTime(),
|
||||
}
|
||||
|
||||
var params []string
|
||||
cs := 30
|
||||
for k, v := range model.Options {
|
||||
for k, v := range m.Options {
|
||||
switch val := v.(type) {
|
||||
case []interface{}:
|
||||
for _, nv := range val {
|
||||
@@ -709,20 +689,59 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) {
|
||||
|
||||
for k, v := range req.Options {
|
||||
if _, ok := req.Options[k]; ok {
|
||||
model.Options[k] = v
|
||||
m.Options[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
fmt.Fprintln(&sb, "# Modelfile generated by \"ollama show\"")
|
||||
fmt.Fprintln(&sb, "# To build a new Modelfile based on this, replace FROM with:")
|
||||
fmt.Fprintf(&sb, "# FROM %s\n\n", model.ShortName)
|
||||
fmt.Fprint(&sb, model.String())
|
||||
fmt.Fprintf(&sb, "# FROM %s\n\n", m.ShortName)
|
||||
fmt.Fprint(&sb, m.String())
|
||||
resp.Modelfile = sb.String()
|
||||
|
||||
kvData, err := getKVData(m.ModelPath, req.Verbose)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(kvData, "general.name")
|
||||
delete(kvData, "tokenizer.chat_template")
|
||||
resp.ModelInfo = kvData
|
||||
|
||||
if len(m.ProjectorPaths) > 0 {
|
||||
projectorData, err := getKVData(m.ProjectorPaths[0], req.Verbose)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.ProjectorInfo = projectorData
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func getKVData(digest string, verbose bool) (llm.KV, error) {
|
||||
maxArraySize := 0
|
||||
if verbose {
|
||||
maxArraySize = -1
|
||||
}
|
||||
kvData, err := llm.LoadModel(digest, maxArraySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kv := kvData.KV()
|
||||
|
||||
if !verbose {
|
||||
for k := range kv {
|
||||
if t, ok := kv[k].([]any); len(t) > 5 && ok {
|
||||
kv[k] = []any{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
func (s *Server) ListModelsHandler(c *gin.Context) {
|
||||
ms, err := Manifests()
|
||||
if err != nil {
|
||||
@@ -986,7 +1005,10 @@ func (s *Server) GenerateRoutes() http.Handler {
|
||||
r.GET("/api/ps", s.ProcessHandler)
|
||||
|
||||
// Compatibility endpoints
|
||||
r.POST("/v1/chat/completions", openai.Middleware(), s.ChatHandler)
|
||||
r.POST("/v1/chat/completions", openai.ChatMiddleware(), s.ChatHandler)
|
||||
r.POST("/v1/completions", openai.CompletionsMiddleware(), s.GenerateHandler)
|
||||
r.GET("/v1/models", openai.ListMiddleware(), s.ListModelsHandler)
|
||||
r.GET("/v1/models/:model", openai.RetrieveMiddleware(), s.ShowModelHandler)
|
||||
|
||||
for _, method := range []string{http.MethodGet, http.MethodHead} {
|
||||
r.Handle(method, "/", func(c *gin.Context) {
|
||||
@@ -1052,11 +1074,20 @@ func Serve(ln net.Listener) error {
|
||||
schedCtx, schedDone := context.WithCancel(ctx)
|
||||
sched := InitScheduler(schedCtx)
|
||||
s := &Server{addr: ln.Addr(), sched: sched}
|
||||
r := s.GenerateRoutes()
|
||||
|
||||
http.Handle("/", s.GenerateRoutes())
|
||||
|
||||
slog.Info(fmt.Sprintf("Listening on %s (version %s)", ln.Addr(), version.Version))
|
||||
srvr := &http.Server{
|
||||
Handler: r,
|
||||
// Use http.DefaultServeMux so we get net/http/pprof for
|
||||
// free.
|
||||
//
|
||||
// TODO(bmizerany): Decide if we want to make this
|
||||
// configurable so it is not exposed by default, or allow
|
||||
// users to bind it to a different port. This was a quick
|
||||
// and easy way to get pprof, but it may not be the best
|
||||
// way.
|
||||
Handler: nil,
|
||||
}
|
||||
|
||||
// listen for a ctrl+c and stop any loaded llm
|
||||
@@ -1175,11 +1206,16 @@ func (s *Server) ProcessHandler(c *gin.Context) {
|
||||
models = append(models, mr)
|
||||
}
|
||||
|
||||
slices.SortStableFunc(models, func(i, j api.ProcessModelResponse) int {
|
||||
// longest duration remaining listed first
|
||||
return cmp.Compare(j.ExpiresAt.Unix(), i.ExpiresAt.Unix())
|
||||
})
|
||||
|
||||
c.JSON(http.StatusOK, api.ProcessResponse{Models: models})
|
||||
}
|
||||
|
||||
// ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model
|
||||
func chatPrompt(ctx context.Context, runner *runnerRef, template string, messages []api.Message, numCtx int) (string, error) {
|
||||
func chatPrompt(ctx context.Context, runner *runnerRef, template *template.Template, messages []api.Message, numCtx int) (string, error) {
|
||||
encode := func(s string) ([]int, error) {
|
||||
return runner.llama.Tokenize(ctx, s)
|
||||
}
|
||||
@@ -1227,8 +1263,8 @@ func (s *Server) ChatHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if model.IsEmbedding() {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "embedding models do not support chat"})
|
||||
if !model.Has(CapabilityCompletion) {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support chat", req.Model)})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1238,14 +1274,7 @@ func (s *Server) ChatHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var sessionDuration time.Duration
|
||||
if req.KeepAlive == nil {
|
||||
sessionDuration = getDefaultSessionDuration()
|
||||
} else {
|
||||
sessionDuration = req.KeepAlive.Duration
|
||||
}
|
||||
|
||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration)
|
||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive)
|
||||
var runner *runnerRef
|
||||
select {
|
||||
case runner = <-rCh:
|
||||
|
@@ -19,6 +19,8 @@ import (
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/openai"
|
||||
"github.com/ollama/ollama/parser"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
"github.com/ollama/ollama/version"
|
||||
@@ -104,6 +106,24 @@ func Test_Routes(t *testing.T) {
|
||||
assert.Empty(t, len(modelList.Models))
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "openai empty list",
|
||||
Method: http.MethodGet,
|
||||
Path: "/v1/models",
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json", contentType)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
var modelList openai.ListCompletion
|
||||
err = json.Unmarshal(body, &modelList)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "list", modelList.Object)
|
||||
assert.Empty(t, modelList.Data)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Tags Handler (yes tags)",
|
||||
Method: http.MethodGet,
|
||||
@@ -127,6 +147,25 @@ func Test_Routes(t *testing.T) {
|
||||
assert.Equal(t, "test-model:latest", modelList.Models[0].Name)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "openai list models with tags",
|
||||
Method: http.MethodGet,
|
||||
Path: "/v1/models",
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json", contentType)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
var modelList openai.ListCompletion
|
||||
err = json.Unmarshal(body, &modelList)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, modelList.Data, 1)
|
||||
assert.Equal(t, "test-model:latest", modelList.Data[0].Id)
|
||||
assert.Equal(t, "library", modelList.Data[0].OwnedBy)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Create Model Handler",
|
||||
Method: http.MethodPost,
|
||||
@@ -212,6 +251,25 @@ func Test_Routes(t *testing.T) {
|
||||
"top_p 0.9",
|
||||
}
|
||||
assert.Equal(t, expectedParams, params)
|
||||
assert.InDelta(t, 0, showResp.ModelInfo["general.parameter_count"], 1e-9, "Parameter count should be 0")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "openai retrieve model handler",
|
||||
Method: http.MethodGet,
|
||||
Path: "/v1/models/show-model",
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json", contentType)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
var retrieveResp api.RetrieveModelResponse
|
||||
err = json.Unmarshal(body, &retrieveResp)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "show-model", retrieveResp.Id)
|
||||
assert.Equal(t, "library", retrieveResp.OwnedBy)
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -325,3 +383,40 @@ func TestCase(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShow(t *testing.T) {
|
||||
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
||||
envconfig.LoadConfig()
|
||||
|
||||
var s Server
|
||||
|
||||
createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "show-model",
|
||||
Modelfile: fmt.Sprintf(
|
||||
"FROM %s\nFROM %s",
|
||||
createBinFile(t, llm.KV{"general.architecture": "test"}, nil),
|
||||
createBinFile(t, llm.KV{"general.architecture": "clip"}, nil),
|
||||
),
|
||||
})
|
||||
|
||||
w := createRequest(t, s.ShowModelHandler, api.ShowRequest{
|
||||
Name: "show-model",
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
var resp api.ShowResponse
|
||||
if err := json.NewDecoder(w.Body).Decode(&resp); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if resp.ModelInfo["general.architecture"] != "test" {
|
||||
t.Fatal("Expected model architecture to be 'test', but got", resp.ModelInfo["general.architecture"])
|
||||
}
|
||||
|
||||
if resp.ProjectorInfo["general.architecture"] != "clip" {
|
||||
t.Fatal("Expected projector architecture to be 'clip', but got", resp.ProjectorInfo["general.architecture"])
|
||||
}
|
||||
}
|
||||
|
320
server/sched.go
320
server/sched.go
@@ -7,7 +7,6 @@ import (
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -24,9 +23,11 @@ type LlmRequest struct {
|
||||
ctx context.Context //nolint:containedctx
|
||||
model *Model
|
||||
opts api.Options
|
||||
sessionDuration time.Duration
|
||||
origNumCtx int // Track the initial ctx request
|
||||
sessionDuration *api.Duration
|
||||
successCh chan *runnerRef
|
||||
errCh chan error
|
||||
schedAttempts uint
|
||||
}
|
||||
|
||||
type Scheduler struct {
|
||||
@@ -38,11 +39,23 @@ type Scheduler struct {
|
||||
loaded map[string]*runnerRef
|
||||
loadedMu sync.Mutex
|
||||
|
||||
loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
|
||||
newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error)
|
||||
getGpuFn func() gpu.GpuInfoList
|
||||
loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int)
|
||||
newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error)
|
||||
getGpuFn func() gpu.GpuInfoList
|
||||
getCpuFn func() gpu.GpuInfoList
|
||||
reschedDelay time.Duration
|
||||
}
|
||||
|
||||
// Default automatic value for number of models we allow per GPU
|
||||
// Model will still need to fit in VRAM, but loading many small models
|
||||
// on a large GPU can cause stalling
|
||||
var defaultModelsPerGPU = 3
|
||||
|
||||
// Default automatic value for parallel setting
|
||||
// Model will still need to fit in VRAM. If this setting wont fit
|
||||
// we'll back off down to 1 to try to get it to fit
|
||||
var defaultParallel = 4
|
||||
|
||||
var ErrMaxQueue = fmt.Errorf("server busy, please try again. maximum pending requests exceeded")
|
||||
|
||||
func InitScheduler(ctx context.Context) *Scheduler {
|
||||
@@ -54,20 +67,19 @@ func InitScheduler(ctx context.Context) *Scheduler {
|
||||
loaded: make(map[string]*runnerRef),
|
||||
newServerFn: llm.NewLlamaServer,
|
||||
getGpuFn: gpu.GetGPUInfo,
|
||||
getCpuFn: gpu.GetCPUInfo,
|
||||
reschedDelay: 250 * time.Millisecond,
|
||||
}
|
||||
sched.loadFn = sched.load
|
||||
return sched
|
||||
}
|
||||
|
||||
// context must be canceled to decrement ref count and release the runner
|
||||
func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration time.Duration) (chan *runnerRef, chan error) {
|
||||
// allocate a large enough kv cache for all parallel requests
|
||||
func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration *api.Duration) (chan *runnerRef, chan error) {
|
||||
if opts.NumCtx < 4 {
|
||||
opts.NumCtx = 4
|
||||
}
|
||||
|
||||
opts.NumCtx *= envconfig.NumParallel
|
||||
|
||||
req := &LlmRequest{
|
||||
ctx: c,
|
||||
model: model,
|
||||
@@ -105,13 +117,33 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
||||
return
|
||||
case pending := <-s.pendingReqCh:
|
||||
// Block other requests until we get this pending request running
|
||||
pending.schedAttempts++
|
||||
if pending.origNumCtx == 0 {
|
||||
pending.origNumCtx = pending.opts.NumCtx
|
||||
}
|
||||
|
||||
if pending.ctx.Err() != nil {
|
||||
slog.Debug("pending request cancelled or timed out, skipping scheduling")
|
||||
continue
|
||||
}
|
||||
numParallel := envconfig.NumParallel
|
||||
// TODO (jmorganca): multimodal models don't support parallel yet
|
||||
// see https://github.com/ollama/ollama/issues/4165
|
||||
if len(pending.model.ProjectorPaths) > 0 && numParallel != 1 {
|
||||
numParallel = 1
|
||||
slog.Warn("multimodal models don't support parallel requests yet")
|
||||
}
|
||||
// Keep NumCtx and numParallel in sync
|
||||
if numParallel > 1 {
|
||||
pending.opts.NumCtx = pending.origNumCtx * numParallel
|
||||
}
|
||||
|
||||
for {
|
||||
cpus := s.getCpuFn()
|
||||
var systemMem gpu.GpuInfo
|
||||
if len(cpus) > 0 {
|
||||
systemMem = cpus[0]
|
||||
}
|
||||
var runnerToExpire *runnerRef
|
||||
s.loadedMu.Lock()
|
||||
runner := s.loaded[pending.model.ModelPath]
|
||||
@@ -131,44 +163,141 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
||||
} else {
|
||||
// Either no models are loaded or below envconfig.MaxRunners
|
||||
// Get a refreshed GPU list
|
||||
gpus := s.getGpuFn()
|
||||
var gpus gpu.GpuInfoList
|
||||
if pending.opts.NumGPU == 0 {
|
||||
gpus = s.getCpuFn()
|
||||
} else {
|
||||
gpus = s.getGpuFn()
|
||||
}
|
||||
|
||||
if envconfig.MaxRunners <= 0 {
|
||||
// No user specified MaxRunners, so figure out what automatic setting to use
|
||||
// If all GPUs have reliable free memory reporting, defaultModelsPerGPU * the number of GPUs
|
||||
// if any GPU has unreliable free memory reporting, 1x the number of GPUs
|
||||
allReliable := true
|
||||
for _, gpu := range gpus {
|
||||
if gpu.UnreliableFreeMemory {
|
||||
allReliable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allReliable {
|
||||
envconfig.MaxRunners = defaultModelsPerGPU * len(gpus)
|
||||
slog.Debug("updating default concurrency", "OLLAMA_MAX_LOADED_MODELS", envconfig.MaxRunners, "gpu_count", len(gpus))
|
||||
} else {
|
||||
slog.Info("one or more GPUs detected that are unable to accurately report free memory - disabling default concurrency")
|
||||
envconfig.MaxRunners = len(gpus)
|
||||
}
|
||||
}
|
||||
|
||||
// Load model for fitting
|
||||
ggml, err := llm.LoadModel(pending.model.ModelPath)
|
||||
ggml, err := llm.LoadModel(pending.model.ModelPath, 0)
|
||||
if err != nil {
|
||||
pending.errCh <- err
|
||||
break
|
||||
}
|
||||
|
||||
// If we're CPU only mode, just limit by envconfig.MaxRunners above
|
||||
// TODO handle system memory exhaustion
|
||||
if (len(gpus) == 1 && gpus[0].Library == "cpu") || pending.opts.NumGPU == 0 {
|
||||
slog.Debug("cpu mode with existing models, loading")
|
||||
s.loadFn(pending, ggml, gpus)
|
||||
break
|
||||
estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts)
|
||||
maxSize := systemMem.FreeMemory
|
||||
|
||||
// Add available GPU memory to the total pool
|
||||
// macOS hardware has unified memory so don't double count
|
||||
if runtime.GOOS != "darwin" {
|
||||
for _, gpu := range gpus {
|
||||
if gpu.Library == "cpu" {
|
||||
continue
|
||||
}
|
||||
if loadedCount == 0 {
|
||||
// If no other models are loaded, set the limit based on what's available
|
||||
maxSize += gpu.FreeMemory
|
||||
} else {
|
||||
// Other models could be unloaded, favor total memory for limit
|
||||
maxSize += gpu.TotalMemory
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No models loaded. Load the model but prefer the best fit.
|
||||
if loadedCount == 0 {
|
||||
// Block attempting to load a model larger than system memory + GPU memory
|
||||
if estimate.TotalSize > maxSize {
|
||||
slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize))
|
||||
|
||||
// Linux will crash if over-allocating memory - return an error to the user.
|
||||
// TODO (jmorganca): add reasonable upper limits for darwin and windows as well
|
||||
if runtime.GOOS == "linux" {
|
||||
pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate if the model will fit in the available system memory, or if we should unload a model first
|
||||
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
||||
// simplifying assumption of defaultParallel when in CPU mode
|
||||
if numParallel <= 0 {
|
||||
numParallel = defaultParallel
|
||||
pending.opts.NumCtx = pending.origNumCtx * numParallel
|
||||
}
|
||||
|
||||
if loadedCount == 0 {
|
||||
slog.Debug("cpu mode with first model, loading")
|
||||
s.loadFn(pending, ggml, gpus, numParallel)
|
||||
break
|
||||
}
|
||||
runnerToExpire = s.maybeFindCPURunnerToUnload(pending, ggml, gpus)
|
||||
if runnerToExpire == nil {
|
||||
slog.Debug("cpu mode with available system memory or first model, loading")
|
||||
s.loadFn(pending, ggml, gpus, numParallel)
|
||||
break
|
||||
}
|
||||
// else we need to expire a runner
|
||||
} else if loadedCount == 0 {
|
||||
// No models loaded. Load the model but prefer the best fit.
|
||||
slog.Debug("loading first model", "model", pending.model.ModelPath)
|
||||
g := pickBestFitGPUs(pending, ggml, gpus)
|
||||
g := pickBestFitGPUs(pending, ggml, gpus, &numParallel)
|
||||
if g != nil {
|
||||
gpus = g
|
||||
}
|
||||
s.loadFn(pending, ggml, gpus)
|
||||
s.loadFn(pending, ggml, gpus, numParallel)
|
||||
break
|
||||
}
|
||||
|
||||
// More than one loaded model, so we have to see if the new one fits
|
||||
// Update free memory from currently loaded models
|
||||
s.updateFreeSpace(gpus)
|
||||
gpus = pickBestFitGPUs(pending, ggml, gpus)
|
||||
if gpus != nil {
|
||||
slog.Debug("new model fits with existing models, loading")
|
||||
s.loadFn(pending, ggml, gpus)
|
||||
break
|
||||
if runnerToExpire == nil {
|
||||
// More than one loaded model, so we have to see if the
|
||||
// new one fits
|
||||
//
|
||||
// We want to avoid loading on any GPUs that have other
|
||||
// models still loading on them to avoid potential races
|
||||
// with VRAM consumption ramping up during load
|
||||
availGpus := s.filterGPUsWithoutLoadingModels(gpus)
|
||||
|
||||
// Update free memory from currently loaded models
|
||||
s.updateFreeSpace(availGpus)
|
||||
fitGpus := pickBestFitGPUs(pending, ggml, availGpus, &numParallel)
|
||||
if fitGpus != nil {
|
||||
slog.Debug("new model fits with existing models, loading")
|
||||
s.loadFn(pending, ggml, fitGpus, numParallel)
|
||||
break
|
||||
}
|
||||
|
||||
// We couldn't find a set of GPUs to fully load the new
|
||||
// model. If no other models are loading (both GPU lists
|
||||
// are the same) then we need to unload another model to
|
||||
// make room
|
||||
if len(availGpus) < len(gpus) {
|
||||
// There are other requests pending, and this one
|
||||
// needs more time, so put it on the back of the
|
||||
// queue so that we might satisfy other pending
|
||||
// requests that aren't blocked
|
||||
go func() {
|
||||
// Process in a go routine to avoid deadlocking
|
||||
// the scheduler if our queue is full
|
||||
slog.Debug("delaying scheduling while other models finish loading", "attempts", pending.schedAttempts, "model", pending.model.ModelPath)
|
||||
time.Sleep(s.reschedDelay)
|
||||
s.pendingReqCh <- pending
|
||||
}()
|
||||
break
|
||||
}
|
||||
runnerToExpire = s.findRunnerToUnload()
|
||||
}
|
||||
runnerToExpire = s.findRunnerToUnload()
|
||||
}
|
||||
|
||||
if runnerToExpire == nil {
|
||||
@@ -297,7 +426,9 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm
|
||||
runner.expireTimer.Stop()
|
||||
runner.expireTimer = nil
|
||||
}
|
||||
runner.sessionDuration = pending.sessionDuration
|
||||
if pending.sessionDuration != nil {
|
||||
runner.sessionDuration = pending.sessionDuration.Duration
|
||||
}
|
||||
pending.successCh <- runner
|
||||
go func() {
|
||||
<-pending.ctx.Done()
|
||||
@@ -306,8 +437,15 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) {
|
||||
llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts)
|
||||
func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int) {
|
||||
if numParallel < 1 {
|
||||
numParallel = 1
|
||||
}
|
||||
sessionDuration := envconfig.KeepAlive
|
||||
if req.sessionDuration != nil {
|
||||
sessionDuration = req.sessionDuration.Duration
|
||||
}
|
||||
llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts, numParallel)
|
||||
if err != nil {
|
||||
// some older models are not compatible with newer versions of llama.cpp
|
||||
// show a generalized compatibility error until there is a better way to
|
||||
@@ -324,13 +462,14 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
|
||||
modelPath: req.model.ModelPath,
|
||||
llama: llama,
|
||||
Options: &req.opts,
|
||||
sessionDuration: req.sessionDuration,
|
||||
sessionDuration: sessionDuration,
|
||||
gpus: gpus,
|
||||
estimatedVRAM: llama.EstimatedVRAM(),
|
||||
estimatedTotal: llama.EstimatedTotal(),
|
||||
loading: true,
|
||||
refCount: 1,
|
||||
}
|
||||
runner.numParallel = numParallel
|
||||
runner.refMu.Lock()
|
||||
|
||||
s.loadedMu.Lock()
|
||||
@@ -368,17 +507,9 @@ func (s *Scheduler) updateFreeSpace(allGpus gpu.GpuInfoList) {
|
||||
s.loadedMu.Lock()
|
||||
for _, r := range s.loaded {
|
||||
r.refMu.Lock()
|
||||
gpuIDs := make([]string, 0, len(r.gpus))
|
||||
if r.llama != nil {
|
||||
// TODO this should be broken down by GPU instead of assuming uniform spread
|
||||
estimatedVRAMPerGPU := r.llama.EstimatedVRAM() / uint64(len(r.gpus))
|
||||
for _, gpu := range r.gpus {
|
||||
gpuIDs = append(gpuIDs, gpu.ID)
|
||||
}
|
||||
for _, gpu := range allGpus {
|
||||
if slices.Contains(gpuIDs, gpu.ID) {
|
||||
predMap[predKey{gpu.Library, gpu.ID}] += estimatedVRAMPerGPU
|
||||
}
|
||||
predMap[predKey{gpu.Library, gpu.ID}] += r.llama.EstimatedVRAMByGPU(gpu.ID)
|
||||
}
|
||||
} else {
|
||||
slog.Warn("unexpected nil runner reference, memory prediction may be incorrect")
|
||||
@@ -401,11 +532,36 @@ func (s *Scheduler) updateFreeSpace(allGpus gpu.GpuInfoList) {
|
||||
// after we start our first runner, then we'll never acount for that, so picking the smallest free value seems prudent.
|
||||
allGpus[i].FreeMemory = allGpus[i].TotalMemory - p
|
||||
}
|
||||
slog.Info("updated VRAM", "gpu", allGpus[i].ID, "library", allGpus[i].Library, "total", format.HumanBytes2(allGpus[i].TotalMemory), "available", format.HumanBytes2(allGpus[i].FreeMemory))
|
||||
slog.Info("updated VRAM based on existing loaded models", "gpu", allGpus[i].ID, "library", allGpus[i].Library, "total", format.HumanBytes2(allGpus[i].TotalMemory), "available", format.HumanBytes2(allGpus[i].FreeMemory))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// While models are loading the VRAM consumption numbers will be indeterminate, so we have
|
||||
// to avoid scheduling another model on the same GPU(s) that haven't stabilized.
|
||||
// This routine returns the set of GPUs that do not have an active loading model.
|
||||
// If all GPUs have loading models, an empty list will be returned (not a single CPU entry)
|
||||
func (s *Scheduler) filterGPUsWithoutLoadingModels(allGpus gpu.GpuInfoList) gpu.GpuInfoList {
|
||||
ret := append(gpu.GpuInfoList{}, allGpus...)
|
||||
s.loadedMu.Lock()
|
||||
defer s.loadedMu.Unlock()
|
||||
for _, runner := range s.loaded {
|
||||
if runner.loading {
|
||||
slog.Debug("overlapping loads detected", "gpus", runner.gpus, "model", runner.modelPath)
|
||||
for _, busyGPU := range runner.gpus {
|
||||
for i := range ret {
|
||||
if ret[i].ID == busyGPU.ID {
|
||||
ret = append(ret[:i], ret[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// TODO consolidate sched_types.go
|
||||
type runnerRef struct {
|
||||
refMu sync.Mutex
|
||||
// refCond sync.Cond // Signaled on transition from 1 -> 0 refCount
|
||||
@@ -422,8 +578,9 @@ type runnerRef struct {
|
||||
expireTimer *time.Timer
|
||||
expiresAt time.Time
|
||||
|
||||
model *Model
|
||||
modelPath string
|
||||
model *Model
|
||||
modelPath string
|
||||
numParallel int
|
||||
*api.Options
|
||||
}
|
||||
|
||||
@@ -464,6 +621,9 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool
|
||||
optsNew.NumGPU = -1
|
||||
}
|
||||
|
||||
// Normalize the NumCtx for parallelism
|
||||
optsExisting.NumCtx = optsExisting.NumCtx / runner.numParallel
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed?
|
||||
@@ -487,8 +647,11 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool
|
||||
func (runner *runnerRef) waitForVRAMRecovery() chan interface{} {
|
||||
finished := make(chan interface{}, 1)
|
||||
|
||||
// CPU or Metal don't need checking, so no waiting required, windows can page VRAM, and the APIs we query tend to be optimistic on free space
|
||||
if (len(runner.gpus) == 1 && (runner.gpus[0].Library == "cpu" || runner.gpus[0].Library == "metal")) || runtime.GOOS == "windows" {
|
||||
// CPU or Metal don't need checking, so no waiting required
|
||||
// windows can page VRAM, only cuda currently can report accurate used vram usage
|
||||
if len(runner.gpus) == 0 ||
|
||||
(len(runner.gpus) == 1 && (runner.gpus[0].Library == "cpu" || runner.gpus[0].Library == "metal")) ||
|
||||
(runtime.GOOS == "windows" && runner.gpus[0].Library != "cuda") {
|
||||
finished <- struct{}{}
|
||||
return finished
|
||||
}
|
||||
@@ -508,7 +671,7 @@ func (runner *runnerRef) waitForVRAMRecovery() chan interface{} {
|
||||
for {
|
||||
<-ticker.C
|
||||
if time.Now().After(expiresAt) {
|
||||
slog.Warn("gpu VRAM usage didn't recover within timeout", "seconds", time.Since(start).Seconds())
|
||||
slog.Warn("gpu VRAM usage didn't recover within timeout", "seconds", time.Since(start).Seconds(), "model", runner.modelPath)
|
||||
finished <- struct{}{}
|
||||
}
|
||||
|
||||
@@ -521,7 +684,7 @@ func (runner *runnerRef) waitForVRAMRecovery() chan interface{} {
|
||||
}
|
||||
// If we're within ~80% of the estimated memory usage recovered, bail out
|
||||
if float32(freeMemoryNow-freeMemoryBefore) > float32(runner.estimatedVRAM)*0.8 {
|
||||
slog.Debug(fmt.Sprintf("gpu VRAM free memory converged after %0.2f seconds", time.Since(start).Seconds()))
|
||||
slog.Debug(fmt.Sprintf("gpu VRAM free memory converged after %0.2f seconds", time.Since(start).Seconds()), "model", runner.modelPath)
|
||||
finished <- struct{}{}
|
||||
return
|
||||
}
|
||||
@@ -547,21 +710,39 @@ func (a ByDuration) Less(i, j int) bool {
|
||||
|
||||
// pickBestFitGPUs will try to find the optimal placement of the model in the available GPUs where the model fully fits
|
||||
// If the model can not be fit fully within the available GPU(s) nil is returned
|
||||
func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) gpu.GpuInfoList {
|
||||
// If numParallel is <= 0, this will attempt try to optimize parallism based on available VRAM, and adjust
|
||||
// opts.NumCtx accordingly
|
||||
func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel *int) gpu.GpuInfoList {
|
||||
var estimatedVRAM uint64
|
||||
|
||||
var numParallelToTry []int
|
||||
if *numParallel <= 0 {
|
||||
// If no specific parallel setting was provided, try larger then smaller, always end with 1
|
||||
numParallelToTry = append(numParallelToTry, defaultParallel, 1)
|
||||
} else {
|
||||
numParallelToTry = []int{*numParallel}
|
||||
}
|
||||
|
||||
for _, gl := range gpus.ByLibrary() {
|
||||
var ok bool
|
||||
sgl := append(make(gpu.GpuInfoList, 0, len(gl)), gl...)
|
||||
|
||||
// TODO - potentially sort by performance capability, existing models loaded, etc.
|
||||
// TODO - Eliminate any GPUs that already have envconfig.MaxRunners loaded on them
|
||||
// Note: at present, this will favor more VRAM over faster GPU speed in mixed setups
|
||||
sort.Sort(sort.Reverse(gpu.ByFreeMemory(sgl)))
|
||||
|
||||
// First attempt to fit the model into a single GPU
|
||||
for _, g := range sgl {
|
||||
if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||
slog.Debug("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM))
|
||||
return []gpu.GpuInfo{g}
|
||||
for _, p := range numParallelToTry {
|
||||
req.opts.NumCtx = req.origNumCtx * p
|
||||
if !envconfig.SchedSpread {
|
||||
for _, g := range sgl {
|
||||
if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||
slog.Info("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "parallel", p, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM))
|
||||
*numParallel = p
|
||||
return []gpu.GpuInfo{g}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -570,9 +751,13 @@ func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) gpu.
|
||||
// - try subsets of GPUs instead of just falling back to 1 or all in a family
|
||||
|
||||
// Now try all the GPUs
|
||||
if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||
slog.Debug("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "required", format.HumanBytes2(estimatedVRAM))
|
||||
return sgl
|
||||
for _, p := range numParallelToTry {
|
||||
req.opts.NumCtx = req.origNumCtx * p
|
||||
if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||
slog.Info("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "parallel", p, "required", format.HumanBytes2(estimatedVRAM))
|
||||
*numParallel = p
|
||||
return sgl
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -586,6 +771,10 @@ func (s *Scheduler) findRunnerToUnload() *runnerRef {
|
||||
runnerList = append(runnerList, r)
|
||||
}
|
||||
s.loadedMu.Unlock()
|
||||
if len(runnerList) == 0 {
|
||||
slog.Debug("no loaded runner to unload")
|
||||
return nil
|
||||
}
|
||||
|
||||
// In the future we can enhance the algorithm to be smarter about picking the optimal runner to unload
|
||||
// e.g., if we have multiple options, will one make room for the request?
|
||||
@@ -616,3 +805,18 @@ func (s *Scheduler) unloadAllRunners() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If other runners are loaded, make sure the pending request will fit in system memory
|
||||
// If not, pick a runner to unload, else return nil and the request can be loaded
|
||||
func (s *Scheduler) maybeFindCPURunnerToUnload(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) *runnerRef {
|
||||
slog.Debug("evaluating if CPU model load will fit in available system memory")
|
||||
estimate := llm.EstimateGPULayers(gpus, ggml, req.model.ProjectorPaths, req.opts)
|
||||
if estimate.TotalSize <= gpus[0].FreeMemory {
|
||||
slog.Debug("cpu inference mode, model fits in available system memory", "model", format.HumanBytes2(estimate.TotalSize), "available", format.HumanBytes2(gpus[0].FreeMemory))
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO - optimization: try to find CPU only runners first, or partial offloads with enough in system memory to make room
|
||||
|
||||
return s.findRunnerToUnload()
|
||||
}
|
||||
|
@@ -44,14 +44,14 @@ func TestLoad(t *testing.T) {
|
||||
opts: api.DefaultOptions(),
|
||||
successCh: make(chan *runnerRef, 1),
|
||||
errCh: make(chan error, 1),
|
||||
sessionDuration: 2,
|
||||
sessionDuration: &api.Duration{Duration: 2 * time.Second},
|
||||
}
|
||||
// Fail to load model first
|
||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||
return nil, fmt.Errorf("something failed to load model blah")
|
||||
}
|
||||
gpus := gpu.GpuInfoList{}
|
||||
s.load(req, ggml, gpus)
|
||||
s.load(req, ggml, gpus, 0)
|
||||
require.Empty(t, req.successCh)
|
||||
require.Len(t, req.errCh, 1)
|
||||
s.loadedMu.Lock()
|
||||
@@ -60,11 +60,11 @@ func TestLoad(t *testing.T) {
|
||||
err := <-req.errCh
|
||||
require.Contains(t, err.Error(), "this model may be incompatible")
|
||||
|
||||
server := &mockLlm{estimatedVRAM: 10}
|
||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
||||
server := &mockLlm{estimatedVRAM: 10, estimatedVRAMByGPU: map[string]uint64{}}
|
||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||
return server, nil
|
||||
}
|
||||
s.load(req, ggml, gpus)
|
||||
s.load(req, ggml, gpus, 0)
|
||||
select {
|
||||
case err := <-req.errCh:
|
||||
require.NoError(t, err)
|
||||
@@ -78,12 +78,12 @@ func TestLoad(t *testing.T) {
|
||||
|
||||
req.model.ModelPath = "dummy_model_path"
|
||||
server.waitResp = fmt.Errorf("wait failure")
|
||||
s.load(req, ggml, gpus)
|
||||
s.load(req, ggml, gpus, 0)
|
||||
select {
|
||||
case err := <-req.errCh:
|
||||
require.Contains(t, err.Error(), "wait failure")
|
||||
case resp := <-req.successCh:
|
||||
t.Errorf("unexpected success %v", resp)
|
||||
t.Fatalf("unexpected success %v", resp)
|
||||
}
|
||||
s.loadedMu.Lock()
|
||||
runner := s.loaded["dummy_model_path"]
|
||||
@@ -102,7 +102,7 @@ type bundle struct {
|
||||
ggml *llm.GGML
|
||||
}
|
||||
|
||||
func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
||||
func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||
return scenario.srv, nil
|
||||
}
|
||||
|
||||
@@ -128,44 +128,46 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
|
||||
"tokenizer.ggml.scores": []float32{0},
|
||||
"tokenizer.ggml.token_type": []int32{0},
|
||||
}, []llm.Tensor{
|
||||
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}},
|
||||
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
{Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
fname := f.Name()
|
||||
model := &Model{Name: modelName, ModelPath: fname}
|
||||
scenario.ggml, err = llm.LoadModel(model.ModelPath)
|
||||
scenario.ggml, err = llm.LoadModel(model.ModelPath, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
scenario.req = &LlmRequest{
|
||||
ctx: scenario.ctx,
|
||||
model: model,
|
||||
opts: api.DefaultOptions(),
|
||||
sessionDuration: 5 * time.Millisecond,
|
||||
sessionDuration: &api.Duration{Duration: 5 * time.Millisecond},
|
||||
successCh: make(chan *runnerRef, 1),
|
||||
errCh: make(chan error, 1),
|
||||
}
|
||||
scenario.srv = &mockLlm{estimatedVRAM: estimatedVRAM}
|
||||
scenario.srv = &mockLlm{estimatedVRAM: estimatedVRAM, estimatedVRAMByGPU: map[string]uint64{"": estimatedVRAM}}
|
||||
return scenario
|
||||
}
|
||||
|
||||
func TestRequests(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), time.Second)
|
||||
ctx, done := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer done()
|
||||
|
||||
// Same model, same request
|
||||
scenario1a := newScenario(t, ctx, "ollama-model-1", 10)
|
||||
scenario1a.req.sessionDuration = 0
|
||||
scenario1a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond}
|
||||
scenario1b := newScenario(t, ctx, "ollama-model-1", 11)
|
||||
scenario1b.req.model = scenario1a.req.model
|
||||
scenario1b.ggml = scenario1a.ggml
|
||||
scenario1b.req.sessionDuration = 0
|
||||
scenario1b.req.sessionDuration = &api.Duration{Duration: 0}
|
||||
|
||||
// simple reload of same model
|
||||
scenario2a := newScenario(t, ctx, "ollama-model-1", 20)
|
||||
tmpModel := *scenario1a.req.model
|
||||
scenario2a.req.model = &tmpModel
|
||||
scenario2a.ggml = scenario1a.ggml
|
||||
scenario2a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond}
|
||||
|
||||
// Multiple loaded models
|
||||
scenario3a := newScenario(t, ctx, "ollama-model-3a", 1*format.GigaByte)
|
||||
@@ -181,6 +183,12 @@ func TestRequests(t *testing.T) {
|
||||
g.FreeMemory = 12 * format.GigaByte
|
||||
return []gpu.GpuInfo{g}
|
||||
}
|
||||
s.getCpuFn = func() gpu.GpuInfoList {
|
||||
g := gpu.GpuInfo{Library: "cpu"}
|
||||
g.TotalMemory = 32 * format.GigaByte
|
||||
g.FreeMemory = 26 * format.GigaByte
|
||||
return []gpu.GpuInfo{g}
|
||||
}
|
||||
s.newServerFn = scenario1a.newServer
|
||||
slog.Info("scenario1a")
|
||||
s.pendingReqCh <- scenario1a.req
|
||||
@@ -191,8 +199,10 @@ func TestRequests(t *testing.T) {
|
||||
require.Equal(t, resp.llama, scenario1a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario1a.req.errCh)
|
||||
case err := <-scenario1a.req.errCh:
|
||||
t.Fatal(err.Error())
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
|
||||
// Same runner as first request due to not needing a reload
|
||||
@@ -204,8 +214,10 @@ func TestRequests(t *testing.T) {
|
||||
require.Equal(t, resp.llama, scenario1a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario1b.req.errCh)
|
||||
case err := <-scenario1b.req.errCh:
|
||||
t.Fatal(err.Error())
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
|
||||
// Trigger a reload
|
||||
@@ -222,8 +234,10 @@ func TestRequests(t *testing.T) {
|
||||
require.Equal(t, resp.llama, scenario2a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario2a.req.errCh)
|
||||
case err := <-scenario2a.req.errCh:
|
||||
t.Fatal(err.Error())
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
|
||||
envconfig.MaxRunners = 1
|
||||
@@ -238,8 +252,10 @@ func TestRequests(t *testing.T) {
|
||||
require.Equal(t, resp.llama, scenario3a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3a.req.errCh)
|
||||
case err := <-scenario3a.req.errCh:
|
||||
t.Fatal(err.Error())
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
s.loadedMu.Lock()
|
||||
require.Len(t, s.loaded, 1)
|
||||
@@ -254,8 +270,10 @@ func TestRequests(t *testing.T) {
|
||||
require.Equal(t, resp.llama, scenario3b.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3b.req.errCh)
|
||||
case err := <-scenario3b.req.errCh:
|
||||
t.Fatal(err.Error())
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
s.loadedMu.Lock()
|
||||
require.Len(t, s.loaded, 2)
|
||||
@@ -270,8 +288,10 @@ func TestRequests(t *testing.T) {
|
||||
require.Equal(t, resp.llama, scenario3c.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3c.req.errCh)
|
||||
case err := <-scenario3c.req.errCh:
|
||||
t.Fatal(err.Error())
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
s.loadedMu.Lock()
|
||||
require.Len(t, s.loaded, 3)
|
||||
@@ -298,7 +318,7 @@ func TestRequests(t *testing.T) {
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3d.req.errCh)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
s.loadedMu.Lock()
|
||||
require.Len(t, s.loaded, 2)
|
||||
@@ -309,13 +329,12 @@ func TestGetRunner(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer done()
|
||||
|
||||
// Same model, same request
|
||||
scenario1a := newScenario(t, ctx, "ollama-model-1a", 10)
|
||||
scenario1a.req.sessionDuration = 0
|
||||
scenario1a.req.sessionDuration = &api.Duration{Duration: 0}
|
||||
scenario1b := newScenario(t, ctx, "ollama-model-1b", 10)
|
||||
scenario1b.req.sessionDuration = 0
|
||||
scenario1b.req.sessionDuration = &api.Duration{Duration: 0}
|
||||
scenario1c := newScenario(t, ctx, "ollama-model-1c", 10)
|
||||
scenario1c.req.sessionDuration = 0
|
||||
scenario1c.req.sessionDuration = &api.Duration{Duration: 0}
|
||||
envconfig.MaxQueuedRequests = 1
|
||||
s := InitScheduler(ctx)
|
||||
s.getGpuFn = func() gpu.GpuInfoList {
|
||||
@@ -342,7 +361,7 @@ func TestGetRunner(t *testing.T) {
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, errCh1a)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
scenario1a.ctxDone()
|
||||
s.loadedMu.Lock()
|
||||
@@ -393,9 +412,9 @@ func TestPrematureExpired(t *testing.T) {
|
||||
slog.Info("sending premature expired event now")
|
||||
s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
time.Sleep(scenario1a.req.sessionDuration)
|
||||
time.Sleep(scenario1a.req.sessionDuration.Duration)
|
||||
scenario1a.ctxDone()
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
require.LessOrEqual(t, len(s.finishedReqCh), 1)
|
||||
@@ -416,11 +435,11 @@ func TestUseLoadedRunner(t *testing.T) {
|
||||
ctx: ctx,
|
||||
opts: api.DefaultOptions(),
|
||||
successCh: make(chan *runnerRef, 1),
|
||||
sessionDuration: 2,
|
||||
sessionDuration: &api.Duration{Duration: 2},
|
||||
}
|
||||
finished := make(chan *LlmRequest)
|
||||
llm1 := &mockLlm{}
|
||||
r1 := &runnerRef{llama: llm1, sessionDuration: 1}
|
||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||
r1 := &runnerRef{llama: llm1, sessionDuration: 1, numParallel: 1}
|
||||
req.useLoadedRunner(r1, finished)
|
||||
require.Equal(t, uint(1), r1.refCount)
|
||||
require.Equal(t, time.Duration(2), r1.sessionDuration)
|
||||
@@ -428,7 +447,7 @@ func TestUseLoadedRunner(t *testing.T) {
|
||||
case success := <-req.successCh:
|
||||
require.Equal(t, r1, success)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
done()
|
||||
fin := <-finished
|
||||
@@ -452,10 +471,10 @@ func TestUpdateFreeSpace(t *testing.T) {
|
||||
gpus[0].FreeMemory = 900
|
||||
gpus[1].TotalMemory = 2000
|
||||
gpus[1].FreeMemory = 1900
|
||||
llm1 := &mockLlm{estimatedVRAM: 100}
|
||||
llm2 := &mockLlm{estimatedVRAM: 200}
|
||||
r1 := &runnerRef{llama: llm1, gpus: gpus}
|
||||
r2 := &runnerRef{llama: llm2, gpus: gpus}
|
||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 50, "2": 50}}
|
||||
llm2 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 125, "2": 75}}
|
||||
r1 := &runnerRef{llama: llm1, gpus: gpus, numParallel: 1}
|
||||
r2 := &runnerRef{llama: llm2, gpus: gpus, numParallel: 1}
|
||||
|
||||
s := InitScheduler(ctx)
|
||||
s.loadedMu.Lock()
|
||||
@@ -464,16 +483,50 @@ func TestUpdateFreeSpace(t *testing.T) {
|
||||
s.loadedMu.Unlock()
|
||||
|
||||
s.updateFreeSpace(gpus)
|
||||
require.Equal(t, uint64(850), gpus[0].FreeMemory)
|
||||
require.Equal(t, uint64(1850), gpus[1].FreeMemory)
|
||||
require.Equal(t, uint64(1000-50-125), gpus[0].FreeMemory)
|
||||
require.Equal(t, uint64(2000-50-75), gpus[1].FreeMemory)
|
||||
}
|
||||
|
||||
func TestFilterGPUsWithoutLoadingModels(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer done()
|
||||
gpus := gpu.GpuInfoList{
|
||||
{
|
||||
Library: "cuda",
|
||||
ID: "0",
|
||||
},
|
||||
{
|
||||
Library: "cuda",
|
||||
ID: "1",
|
||||
},
|
||||
}
|
||||
r1 := &runnerRef{gpus: gpu.GpuInfoList{gpus[0]}, loading: true}
|
||||
|
||||
s := InitScheduler(ctx)
|
||||
s.loadedMu.Lock()
|
||||
s.loaded["a"] = r1
|
||||
s.loadedMu.Unlock()
|
||||
|
||||
tmp := s.filterGPUsWithoutLoadingModels(gpus)
|
||||
require.Len(t, tmp, 1)
|
||||
require.Equal(t, "1", tmp[0].ID)
|
||||
|
||||
r1.gpus = gpu.GpuInfoList{gpus[1]}
|
||||
tmp = s.filterGPUsWithoutLoadingModels(gpus)
|
||||
require.Len(t, tmp, 1)
|
||||
require.Equal(t, "0", tmp[0].ID)
|
||||
|
||||
r1.gpus = gpu.GpuInfoList{}
|
||||
tmp = s.filterGPUsWithoutLoadingModels(gpus)
|
||||
require.Len(t, tmp, 2)
|
||||
}
|
||||
|
||||
func TestFindRunnerToUnload(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer done()
|
||||
|
||||
r1 := &runnerRef{refCount: 1, sessionDuration: 1}
|
||||
r2 := &runnerRef{sessionDuration: 2}
|
||||
r1 := &runnerRef{refCount: 1, sessionDuration: 1, numParallel: 1}
|
||||
r2 := &runnerRef{sessionDuration: 2, numParallel: 1}
|
||||
|
||||
s := InitScheduler(ctx)
|
||||
s.loadedMu.Lock()
|
||||
@@ -492,12 +545,16 @@ func TestNeedsReload(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer done()
|
||||
|
||||
llm := &mockLlm{}
|
||||
llm := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||
do := api.DefaultOptions()
|
||||
runner := &runnerRef{
|
||||
model: &Model{AdapterPaths: []string{"adapter1"}, ProjectorPaths: []string{"projector1"}},
|
||||
Options: &do,
|
||||
llama: llm,
|
||||
model: &Model{
|
||||
AdapterPaths: []string{"adapter1"},
|
||||
ProjectorPaths: []string{"projector1"},
|
||||
},
|
||||
Options: &do,
|
||||
llama: llm,
|
||||
numParallel: 1,
|
||||
}
|
||||
req := &LlmRequest{
|
||||
model: &Model{
|
||||
@@ -535,13 +592,13 @@ func TestUnloadAllRunners(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer done()
|
||||
|
||||
llm1 := &mockLlm{}
|
||||
llm2 := &mockLlm{}
|
||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||
llm2 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||
s := InitScheduler(ctx)
|
||||
s.unloadAllRunners()
|
||||
|
||||
r1 := &runnerRef{llama: llm1}
|
||||
r2 := &runnerRef{llama: llm2}
|
||||
r1 := &runnerRef{llama: llm1, numParallel: 1}
|
||||
r2 := &runnerRef{llama: llm2, numParallel: 1}
|
||||
|
||||
s.loadedMu.Lock()
|
||||
s.loaded["a"] = r1
|
||||
@@ -554,29 +611,48 @@ func TestUnloadAllRunners(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnload(t *testing.T) {
|
||||
llm1 := &mockLlm{}
|
||||
r1 := &runnerRef{llama: llm1}
|
||||
r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}}
|
||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||
r1 := &runnerRef{llama: llm1, numParallel: 1}
|
||||
r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}, numParallel: 1}
|
||||
r1.unload()
|
||||
require.True(t, llm1.closeCalled)
|
||||
r2.unload()
|
||||
require.Nil(t, r2.model)
|
||||
}
|
||||
|
||||
func TestAlreadyCanceled(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
defer done()
|
||||
dctx, done2 := context.WithCancel(ctx)
|
||||
done2()
|
||||
scenario1a := newScenario(t, dctx, "ollama-model-1", 10)
|
||||
scenario1a.req.sessionDuration = &api.Duration{Duration: 0}
|
||||
s := InitScheduler(ctx)
|
||||
slog.Info("scenario1a")
|
||||
s.pendingReqCh <- scenario1a.req
|
||||
require.Len(t, s.pendingReqCh, 1)
|
||||
s.Run(ctx)
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario1a.req.errCh)
|
||||
require.Empty(t, scenario1a.req.successCh)
|
||||
}
|
||||
|
||||
type mockLlm struct {
|
||||
pingResp error
|
||||
waitResp error
|
||||
completionResp error
|
||||
embeddingResp []float64
|
||||
embeddingRespErr error
|
||||
tokenizeResp []int
|
||||
tokenizeRespErr error
|
||||
detokenizeResp string
|
||||
detonekizeRespErr error
|
||||
closeResp error
|
||||
closeCalled bool
|
||||
estimatedVRAM uint64
|
||||
estimatedTotal uint64
|
||||
pingResp error
|
||||
waitResp error
|
||||
completionResp error
|
||||
embeddingResp []float64
|
||||
embeddingRespErr error
|
||||
tokenizeResp []int
|
||||
tokenizeRespErr error
|
||||
detokenizeResp string
|
||||
detonekizeRespErr error
|
||||
closeResp error
|
||||
closeCalled bool
|
||||
estimatedVRAM uint64
|
||||
estimatedTotal uint64
|
||||
estimatedVRAMByGPU map[string]uint64
|
||||
}
|
||||
|
||||
func (s *mockLlm) Ping(ctx context.Context) error { return s.pingResp }
|
||||
@@ -597,5 +673,6 @@ func (s *mockLlm) Close() error {
|
||||
s.closeCalled = true
|
||||
return s.closeResp
|
||||
}
|
||||
func (s *mockLlm) EstimatedVRAM() uint64 { return s.estimatedVRAM }
|
||||
func (s *mockLlm) EstimatedTotal() uint64 { return s.estimatedTotal }
|
||||
func (s *mockLlm) EstimatedVRAM() uint64 { return s.estimatedVRAM }
|
||||
func (s *mockLlm) EstimatedTotal() uint64 { return s.estimatedTotal }
|
||||
func (s *mockLlm) EstimatedVRAMByGPU(gpuid string) uint64 { return s.estimatedVRAMByGPU[gpuid] }
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user