Compare commits
315 Commits
language_s
...
v0.2.3
Author | SHA1 | Date | |
---|---|---|---|
![]() |
22c5451fc2 | ||
![]() |
23ebbaa46e | ||
![]() |
9ac0a7a50b | ||
![]() |
e5c65a85df | ||
![]() |
33627331a3 | ||
![]() |
36c87c433b | ||
![]() |
179737feb7 | ||
![]() |
47353f5ee4 | ||
![]() |
10e768826c | ||
![]() |
5056bb9c01 | ||
![]() |
c4cf8ad559 | ||
![]() |
57ec6901eb | ||
![]() |
e64f9ebb44 | ||
![]() |
791650ddef | ||
![]() |
efbf41ed81 | ||
![]() |
cf15589851 | ||
![]() |
19753c18c0 | ||
![]() |
41be28096a | ||
![]() |
37a570f962 | ||
![]() |
5a739ff4cb | ||
![]() |
4e262eb2a8 | ||
![]() |
4cfcbc328f | ||
![]() |
79292ff3e0 | ||
![]() |
8ea500441d | ||
![]() |
b50c818623 | ||
![]() |
b99e750b62 | ||
![]() |
1f50356e8e | ||
![]() |
22c81f62ec | ||
![]() |
2d1e3c3229 | ||
![]() |
4918fae535 | ||
![]() |
0aff67877e | ||
![]() |
f6f759fc5f | ||
![]() |
9544a57ee4 | ||
![]() |
b51e3b63ac | ||
![]() |
6bbbc50f10 | ||
![]() |
9bbddc37a7 | ||
![]() |
e4ff73297d | ||
![]() |
b44320db13 | ||
![]() |
0bacb30007 | ||
![]() |
53da2c6965 | ||
![]() |
d8def1ff94 | ||
![]() |
571dc61955 | ||
![]() |
0e09c380fc | ||
![]() |
0ee87615c7 | ||
![]() |
f8241bfba3 | ||
![]() |
4607c70641 | ||
![]() |
c12f1c5b99 | ||
![]() |
a08f20d910 | ||
![]() |
6cea036027 | ||
![]() |
5796bfc401 | ||
![]() |
f1a379aa56 | ||
![]() |
9ae146993e | ||
![]() |
e0348d3fe8 | ||
![]() |
2cc854f8cb | ||
![]() |
5304b765b2 | ||
![]() |
fb6cbc02fb | ||
![]() |
4fd5f3526a | ||
![]() |
842f85f758 | ||
![]() |
9d30f9f8b3 | ||
![]() |
631cfd9e62 | ||
![]() |
326363b3a7 | ||
![]() |
ac7a842e55 | ||
![]() |
2c3fe1fd97 | ||
![]() |
269ed6e6a2 | ||
![]() |
78fb33dd07 | ||
![]() |
8f8e736b13 | ||
![]() |
d89454de80 | ||
![]() |
af28b94533 | ||
![]() |
e9188e971a | ||
![]() |
78eddfc068 | ||
![]() |
02c24d3d01 | ||
![]() |
52abc8acb7 | ||
![]() |
4d71c559b2 | ||
![]() |
0d16eb310e | ||
![]() |
8072e205ff | ||
![]() |
955f2a4e03 | ||
![]() |
3c75113e37 | ||
![]() |
ccd7785859 | ||
![]() |
3b5a4a77f3 | ||
![]() |
daed0634a9 | ||
![]() |
0d4dd707bc | ||
![]() |
0e982bc1f4 | ||
![]() |
6298f49816 | ||
![]() |
ef757da2c9 | ||
![]() |
e5352297d9 | ||
![]() |
65a5040e09 | ||
![]() |
d626b99b54 | ||
![]() |
dddb58a38b | ||
![]() |
400056e154 | ||
![]() |
d2f19024d0 | ||
![]() |
69c04eecc4 | ||
![]() |
996bb1b85e | ||
![]() |
422dcc3856 | ||
![]() |
020bd60ab2 | ||
![]() |
8e277b72bb | ||
![]() |
4f67b39d26 | ||
![]() |
2425281317 | ||
![]() |
0403e9860e | ||
![]() |
33a65e3ba3 | ||
![]() |
88bcd79bb9 | ||
![]() |
7e571f95f0 | ||
![]() |
da8e2a0447 | ||
![]() |
a30915bde1 | ||
![]() |
58e3fff311 | ||
![]() |
3f0b309ad4 | ||
![]() |
e70610ef06 | ||
![]() |
dfded7e075 | ||
![]() |
173b550438 | ||
![]() |
cff3f44f4a | ||
![]() |
26e4e66faf | ||
![]() |
97c9e11768 | ||
![]() |
3518aaef33 | ||
![]() |
1963c00201 | ||
![]() |
27402cb7a2 | ||
![]() |
c1218199cf | ||
![]() |
717f7229eb | ||
![]() |
aae56abb7c | ||
![]() |
5f034f5b63 | ||
![]() |
b910fa9010 | ||
![]() |
6d4219083c | ||
![]() |
1ed4f521c4 | ||
![]() |
de2163dafd | ||
![]() |
9bd00041fa | ||
![]() |
4e986a823c | ||
![]() |
2cc7d05012 | ||
![]() |
123a722a6f | ||
![]() |
4d311eb731 | ||
![]() |
cb42e607c5 | ||
![]() |
2aa91a937b | ||
![]() |
ccef9431c8 | ||
![]() |
642cee1342 | ||
![]() |
9a9e7d83c4 | ||
![]() |
9929751cc8 | ||
![]() |
17b7186cd7 | ||
![]() |
189a43caa2 | ||
![]() |
e835ef1836 | ||
![]() |
7e7749224c | ||
![]() |
c7c2f3bc22 | ||
![]() |
54a79d6a8a | ||
![]() |
5bf5aeec01 | ||
![]() |
e01e535cbb | ||
![]() |
0195d6a2f8 | ||
![]() |
8e0641a9bf | ||
![]() |
662568d453 | ||
![]() |
4ebb66c662 | ||
![]() |
23e899f32d | ||
![]() |
fedf71635e | ||
![]() |
97c59be653 | ||
![]() |
9d8a4988e8 | ||
![]() |
1ae0750a21 | ||
![]() |
9d91e5e587 | ||
![]() |
96624aa412 | ||
![]() |
10f33b8537 | ||
![]() |
4a633cc295 | ||
![]() |
d34d88e417 | ||
![]() |
52ce350b7a | ||
![]() |
2abebb2cbe | ||
![]() |
380e06e5be | ||
![]() |
badf975e45 | ||
![]() |
755b4e4fc2 | ||
![]() |
1a1c99e334 | ||
![]() |
21adf8b6d2 | ||
![]() |
784bf88b0d | ||
![]() |
e873841cbb | ||
![]() |
26d0bf9236 | ||
![]() |
359b15a597 | ||
![]() |
b55958a587 | ||
![]() |
7784ca33ce | ||
![]() |
c9c8c98bf6 | ||
![]() |
171796791f | ||
![]() |
176d0f7075 | ||
![]() |
8ed51cac37 | ||
![]() |
c9e6f0542d | ||
![]() |
b0930626c5 | ||
![]() |
e890be4814 | ||
![]() |
b2799f111b | ||
![]() |
152fc202f5 | ||
![]() |
4ad0d4d6d3 | ||
![]() |
163cd3e77c | ||
![]() |
4c2c8f93dd | ||
![]() |
fd1e6e0590 | ||
![]() |
89c79bec8c | ||
![]() |
c7b77004e3 | ||
![]() |
07d143f412 | ||
![]() |
a12283e2ff | ||
![]() |
4b0050cf0e | ||
![]() |
0577af98f4 | ||
![]() |
17ce203a26 | ||
![]() |
d76555ffb5 | ||
![]() |
2786dff5d3 | ||
![]() |
225f0d1219 | ||
![]() |
532db58311 | ||
![]() |
6be309e1bd | ||
![]() |
da3bf23354 | ||
![]() |
26ab67732b | ||
![]() |
45cacbaf05 | ||
![]() |
17df6520c8 | ||
![]() |
6f351bf586 | ||
![]() |
ff4f0cbd1d | ||
![]() |
fc37c192ae | ||
![]() |
434dfe30c5 | ||
![]() |
4e2b7e181d | ||
![]() |
48702dd149 | ||
![]() |
68dfc6236a | ||
![]() |
5e8ff556cb | ||
![]() |
6fd04ca922 | ||
![]() |
206797bda4 | ||
![]() |
43ed358f9a | ||
![]() |
b32ebb4f29 | ||
![]() |
fb9cdfa723 | ||
![]() |
efac488675 | ||
![]() |
6b800aa7b7 | ||
![]() |
dd7c9ebeaf | ||
![]() |
4dc7fb9525 | ||
![]() |
c39761c552 | ||
![]() |
aac367636d | ||
![]() |
15a687ae4b | ||
![]() |
d528e1af75 | ||
![]() |
cd234ce22c | ||
![]() |
94618b2365 | ||
![]() |
1fd236d177 | ||
![]() |
e87fc7200d | ||
![]() |
20b9f8e6f4 | ||
![]() |
c69bc19e46 | ||
![]() |
bba5d177aa | ||
![]() |
c16f8af911 | ||
![]() |
217f60c3d9 | ||
![]() |
7bdcd1da94 | ||
![]() |
ead259d877 | ||
![]() |
2ff45d571d | ||
![]() |
157f09acdf | ||
![]() |
0f3cf1d42e | ||
![]() |
5bc029c529 | ||
![]() |
e9a9c6a8e8 | ||
![]() |
515f497e6d | ||
![]() |
b27268aaef | ||
![]() |
f5f245cc15 | ||
![]() |
94d37fdcae | ||
![]() |
b84aea1685 | ||
![]() |
896495de7b | ||
![]() |
5528dd9d11 | ||
![]() |
943172cbf4 | ||
![]() |
85169e8d6f | ||
![]() |
34f142797a | ||
![]() |
46a7f1e74a | ||
![]() |
620d5c569e | ||
![]() |
b9ce7bf75e | ||
![]() |
cddc63381c | ||
![]() |
385a32ecb5 | ||
![]() |
030e765e76 | ||
![]() |
ab8c929e20 | ||
![]() |
ce0dc33cb8 | ||
![]() |
78f81fc0e5 | ||
![]() |
9b6c2e6eb6 | ||
![]() |
1a29e9a879 | ||
![]() |
4bf1da4944 | ||
![]() |
de5beb06b3 | ||
![]() |
98e65929dc | ||
![]() |
66ab48772f | ||
![]() |
22fcf8f7de | ||
![]() |
28c7813ac4 | ||
![]() |
1d8616d30f | ||
![]() |
d61ef8b954 | ||
![]() |
89d9900152 | ||
![]() |
4a048715b6 | ||
![]() |
6297f85606 | ||
![]() |
ed56428dd7 | ||
![]() |
ad40b92b6a | ||
![]() |
8ce4032e72 | ||
![]() |
42660466f8 | ||
![]() |
e919f6811f | ||
![]() |
bf7edb0d5d | ||
![]() |
f38353d6b9 | ||
![]() |
201d853fdf | ||
![]() |
e40145a39d | ||
![]() |
c895a7d13f | ||
![]() |
dad7a987ae | ||
![]() |
8ffb51749f | ||
![]() |
55f6eba049 | ||
![]() |
04f3c12bb7 | ||
![]() |
60323e0805 | ||
![]() |
d4a86102fd | ||
![]() |
476fb8e892 | ||
![]() |
829ff87bd1 | ||
![]() |
f6b622c4b3 | ||
![]() |
2e4da8eec2 | ||
![]() |
763bb65dbb | ||
![]() |
7ca9605f54 | ||
![]() |
eb2c443a79 | ||
![]() |
278e25ea44 | ||
![]() |
a50a87a7b8 | ||
![]() |
98085015d5 | ||
![]() |
bf54c845e9 | ||
![]() |
c365f195a8 | ||
![]() |
e91d0ef737 | ||
![]() |
22f5c12ced | ||
![]() |
298c996e54 | ||
![]() |
0fc0cfc6d2 | ||
![]() |
914f68f021 | ||
![]() |
bd1d119ba9 | ||
![]() |
a03be18189 | ||
![]() |
96bc232b43 | ||
![]() |
bca7b12284 | ||
![]() |
32cb1960c1 | ||
![]() |
de781b37c8 | ||
![]() |
3e21799377 | ||
![]() |
26a00a0410 | ||
![]() |
f77713bf1f | ||
![]() |
85a57006d1 | ||
![]() |
c5e892cb3e | ||
![]() |
81fb06f530 | ||
![]() |
a385382ff5 | ||
![]() |
b8772a353f | ||
![]() |
c2714fcbfd | ||
![]() |
a2fc933fed |
32
.github/workflows/release.yaml
vendored
32
.github/workflows/release.yaml
vendored
@@ -147,7 +147,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
$ErrorActionPreference = "Stop"
|
$ErrorActionPreference = "Stop"
|
||||||
write-host "downloading AMD HIP Installer"
|
write-host "downloading AMD HIP Installer"
|
||||||
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||||
write-host "Installing AMD HIP"
|
write-host "Installing AMD HIP"
|
||||||
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||||
write-host "Completed AMD HIP"
|
write-host "Completed AMD HIP"
|
||||||
@@ -437,6 +437,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
OLLAMA_SKIP_IMAGE_BUILD: '1'
|
OLLAMA_SKIP_IMAGE_BUILD: '1'
|
||||||
PUSH: '1'
|
PUSH: '1'
|
||||||
|
GH_TOKEN: ${{ github.token }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set Version
|
- name: Set Version
|
||||||
@@ -460,15 +461,20 @@ jobs:
|
|||||||
ls -lh dist/
|
ls -lh dist/
|
||||||
(cd dist; sha256sum * > sha256sum.txt)
|
(cd dist; sha256sum * > sha256sum.txt)
|
||||||
cat dist/sha256sum.txt
|
cat dist/sha256sum.txt
|
||||||
- uses: ncipollo/release-action@v1
|
- name: Create or update Release
|
||||||
with:
|
run: |
|
||||||
name: ${{ env.RELEASE_VERSION }}
|
echo "Looking for existing release for ${{ env.RELEASE_VERSION }}"
|
||||||
allowUpdates: true
|
OLD_TAG=$(gh release ls --json name,tagName | jq -r ".[] | select(.name == \"${{ env.RELEASE_VERSION }}\") | .tagName")
|
||||||
artifacts: 'dist/*'
|
if [ -n "$OLD_TAG" ]; then
|
||||||
draft: true
|
echo "Updating release ${{ env.RELEASE_VERSION }} to point to new tag ${GITHUB_REF_NAME}"
|
||||||
prerelease: true
|
gh release edit ${OLD_TAG} --tag ${GITHUB_REF_NAME}
|
||||||
omitBodyDuringUpdate: true
|
else
|
||||||
generateReleaseNotes: true
|
echo "Creating new release ${{ env.RELEASE_VERSION }} pointing to tag ${GITHUB_REF_NAME}"
|
||||||
omitDraftDuringUpdate: true
|
gh release create ${GITHUB_REF_NAME} \
|
||||||
omitPrereleaseDuringUpdate: true
|
--title ${{ env.RELEASE_VERSION }} \
|
||||||
replacesArtifacts: true
|
--draft \
|
||||||
|
--generate-notes \
|
||||||
|
--prerelease
|
||||||
|
fi
|
||||||
|
echo "Uploading artifacts for tag ${GITHUB_REF_NAME}"
|
||||||
|
gh release upload ${GITHUB_REF_NAME} dist/* --clobber
|
||||||
|
20
.github/workflows/test.yaml
vendored
20
.github/workflows/test.yaml
vendored
@@ -34,13 +34,13 @@ jobs:
|
|||||||
git diff-tree -r --no-commit-id --name-only \
|
git diff-tree -r --no-commit-id --name-only \
|
||||||
$(git merge-base ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }}) \
|
$(git merge-base ${{ github.event.pull_request.base.sha }} ${{ github.event.pull_request.head.sha }}) \
|
||||||
${{ github.event.pull_request.head.sha }} \
|
${{ github.event.pull_request.head.sha }} \
|
||||||
| xargs python3 -c "import sys; print(any([x.startswith('$1') for x in sys.argv[1:]]))"
|
| xargs python3 -c "import sys; from pathlib import Path; print(any(Path(x).match(glob) for x in sys.argv[1:] for glob in '$*'.split(' ')))"
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
echo GENERATE=$(changed llm/)
|
echo GENERATE=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
echo GENERATE_CUDA=$(changed llm/)
|
echo GENERATE_CUDA=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
echo GENERATE_ROCM=$(changed llm/)
|
echo GENERATE_ROCM=$(changed 'llm/llama.cpp' 'llm/patches/**' 'llm/ext_server/**' 'llm/generate/**')
|
||||||
} >>$GITHUB_OUTPUT
|
} >>$GITHUB_OUTPUT
|
||||||
|
|
||||||
generate:
|
generate:
|
||||||
@@ -58,6 +58,7 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
env:
|
env:
|
||||||
GOARCH: ${{ matrix.arch }}
|
GOARCH: ${{ matrix.arch }}
|
||||||
|
CGO_ENABLED: '1'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
@@ -79,6 +80,7 @@ jobs:
|
|||||||
- run: go generate -x ./...
|
- run: go generate -x ./...
|
||||||
if: ${{ ! startsWith(matrix.os, 'windows-') }}
|
if: ${{ ! startsWith(matrix.os, 'windows-') }}
|
||||||
name: 'Unix Go Generate'
|
name: 'Unix Go Generate'
|
||||||
|
- run: go build .
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
||||||
@@ -124,7 +126,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
rocm-version:
|
rocm-version:
|
||||||
- '6.0.2'
|
- '6.1.1'
|
||||||
runs-on: linux
|
runs-on: linux
|
||||||
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
||||||
steps:
|
steps:
|
||||||
@@ -167,7 +169,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
$ErrorActionPreference = "Stop"
|
$ErrorActionPreference = "Stop"
|
||||||
write-host "downloading AMD HIP Installer"
|
write-host "downloading AMD HIP Installer"
|
||||||
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe"
|
||||||
write-host "Installing AMD HIP"
|
write-host "Installing AMD HIP"
|
||||||
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait
|
||||||
write-host "Completed AMD HIP"
|
write-host "Completed AMD HIP"
|
||||||
@@ -269,9 +271,9 @@ jobs:
|
|||||||
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
||||||
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
||||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||||
- uses: golangci/golangci-lint-action@v4
|
- uses: golangci/golangci-lint-action@v6
|
||||||
with:
|
with:
|
||||||
args: --timeout 8m0s -v
|
args: --timeout 8m0s -v ${{ startsWith(matrix.os, 'windows-') && '' || '--disable gofmt --disable goimports' }}
|
||||||
test:
|
test:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -287,6 +289,8 @@ jobs:
|
|||||||
GOARCH: ${{ matrix.arch }}
|
GOARCH: ${{ matrix.arch }}
|
||||||
CGO_ENABLED: '1'
|
CGO_ENABLED: '1'
|
||||||
OLLAMA_CPU_TARGET: 'static'
|
OLLAMA_CPU_TARGET: 'static'
|
||||||
|
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||||
|
OLLAMA_SKIP_METAL_GENERATE: '1'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
@@ -9,9 +9,26 @@ linters:
|
|||||||
- contextcheck
|
- contextcheck
|
||||||
- exportloopref
|
- exportloopref
|
||||||
- gocheckcompilerdirectives
|
- gocheckcompilerdirectives
|
||||||
# FIXME: for some reason this errors on windows
|
# conditionally enable this on linux/macos
|
||||||
# - gofmt
|
# - gofmt
|
||||||
# - goimports
|
# - goimports
|
||||||
|
- intrange
|
||||||
- misspell
|
- misspell
|
||||||
- nilerr
|
- nilerr
|
||||||
|
- nolintlint
|
||||||
|
- nosprintfhostport
|
||||||
|
- testifylint
|
||||||
|
- unconvert
|
||||||
- unused
|
- unused
|
||||||
|
- wastedassign
|
||||||
|
- whitespace
|
||||||
|
- usestdlibvars
|
||||||
|
severity:
|
||||||
|
default-severity: error
|
||||||
|
rules:
|
||||||
|
- linters:
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- intrange
|
||||||
|
- usestdlibvars
|
||||||
|
severity: info
|
||||||
|
@@ -2,7 +2,7 @@ ARG GOLANG_VERSION=1.22.1
|
|||||||
ARG CMAKE_VERSION=3.22.1
|
ARG CMAKE_VERSION=3.22.1
|
||||||
# this CUDA_VERSION corresponds with the one specified in docs/gpu.md
|
# this CUDA_VERSION corresponds with the one specified in docs/gpu.md
|
||||||
ARG CUDA_VERSION=11.3.1
|
ARG CUDA_VERSION=11.3.1
|
||||||
ARG ROCM_VERSION=6.0.2
|
ARG ROCM_VERSION=6.1.1
|
||||||
|
|
||||||
# Copy the minimal context we need to run the generate scripts
|
# Copy the minimal context we need to run the generate scripts
|
||||||
FROM scratch AS llm-code
|
FROM scratch AS llm-code
|
||||||
@@ -70,12 +70,12 @@ RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx" sh gen_linux.sh
|
|||||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64
|
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64
|
||||||
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx2" sh gen_linux.sh
|
RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx2" sh gen_linux.sh
|
||||||
|
|
||||||
FROM --platform=linux/arm64 centos:7 AS cpu-builder-arm64
|
FROM --platform=linux/arm64 rockylinux:8 AS cpu-builder-arm64
|
||||||
ARG CMAKE_VERSION
|
ARG CMAKE_VERSION
|
||||||
ARG GOLANG_VERSION
|
ARG GOLANG_VERSION
|
||||||
COPY ./scripts/rh_linux_deps.sh /
|
COPY ./scripts/rh_linux_deps.sh /
|
||||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||||
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
COPY --from=llm-code / /go/src/github.com/ollama/ollama/
|
||||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||||
ARG CGO_CFLAGS
|
ARG CGO_CFLAGS
|
||||||
|
20
README.md
20
README.md
@@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
[](https://discord.gg/ollama)
|
[](https://discord.gg/ollama)
|
||||||
|
|
||||||
Get up and running with large language models locally.
|
Get up and running with large language models.
|
||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
@@ -53,8 +53,8 @@ Here are some example models that can be downloaded:
|
|||||||
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
||||||
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||||
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||||
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` |
|
||||||
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` |
|
||||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||||
@@ -182,6 +182,12 @@ $ ollama run llama3 "Summarize this file: $(cat README.md)"
|
|||||||
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Show model information
|
||||||
|
|
||||||
|
```
|
||||||
|
ollama show llama3
|
||||||
|
```
|
||||||
|
|
||||||
### List models on your computer
|
### List models on your computer
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -285,6 +291,8 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
|
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
|
||||||
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||||
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||||
|
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
|
||||||
|
- [Ollama with Google Mesop](https://github.com/rapidarchitect/ollama_mesop/) (Mesop Chat Client implementation with Ollama)
|
||||||
|
|
||||||
### Terminal
|
### Terminal
|
||||||
|
|
||||||
@@ -307,6 +315,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
||||||
- [tlm](https://github.com/yusufcanb/tlm)
|
- [tlm](https://github.com/yusufcanb/tlm)
|
||||||
- [podman-ollama](https://github.com/ericcurtin/podman-ollama)
|
- [podman-ollama](https://github.com/ericcurtin/podman-ollama)
|
||||||
|
- [gollama](https://github.com/sammcj/gollama)
|
||||||
|
|
||||||
### Database
|
### Database
|
||||||
|
|
||||||
@@ -324,11 +333,13 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||||
- [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java)
|
- [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java)
|
||||||
|
- [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs)
|
||||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||||
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
|
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
|
||||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
||||||
|
- [Ollama-hpp for C++](https://github.com/jmont-dev/ollama-hpp)
|
||||||
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
||||||
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
||||||
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
||||||
@@ -346,6 +357,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
|
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
|
||||||
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
|
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
|
||||||
- [LlamaScript](https://github.com/Project-Llama/llamascript)
|
- [LlamaScript](https://github.com/Project-Llama/llamascript)
|
||||||
|
|
||||||
### Mobile
|
### Mobile
|
||||||
|
|
||||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||||
@@ -378,7 +390,9 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
|
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
|
||||||
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
|
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
|
||||||
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
|
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
|
||||||
|
- [Headless Ollama](https://github.com/nischalj10/headless-ollama) (Scripts to automatically install ollama client & models on any OS for apps that depends on ollama server)
|
||||||
|
|
||||||
### Supported backends
|
### Supported backends
|
||||||
|
|
||||||
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.
|
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.
|
||||||
|
|
||||||
|
@@ -23,11 +23,9 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
"github.com/ollama/ollama/version"
|
"github.com/ollama/ollama/version"
|
||||||
)
|
)
|
||||||
@@ -65,10 +63,7 @@ func checkError(resp *http.Response, body []byte) error {
|
|||||||
// If the variable is not specified, a default ollama host and port will be
|
// If the variable is not specified, a default ollama host and port will be
|
||||||
// used.
|
// used.
|
||||||
func ClientFromEnvironment() (*Client, error) {
|
func ClientFromEnvironment() (*Client, error) {
|
||||||
ollamaHost, err := GetOllamaHost()
|
ollamaHost := envconfig.Host
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Client{
|
return &Client{
|
||||||
base: &url.URL{
|
base: &url.URL{
|
||||||
@@ -79,52 +74,6 @@ func ClientFromEnvironment() (*Client, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type OllamaHost struct {
|
|
||||||
Scheme string
|
|
||||||
Host string
|
|
||||||
Port string
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetOllamaHost() (OllamaHost, error) {
|
|
||||||
defaultPort := "11434"
|
|
||||||
|
|
||||||
hostVar := os.Getenv("OLLAMA_HOST")
|
|
||||||
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
|
|
||||||
|
|
||||||
scheme, hostport, ok := strings.Cut(hostVar, "://")
|
|
||||||
switch {
|
|
||||||
case !ok:
|
|
||||||
scheme, hostport = "http", hostVar
|
|
||||||
case scheme == "http":
|
|
||||||
defaultPort = "80"
|
|
||||||
case scheme == "https":
|
|
||||||
defaultPort = "443"
|
|
||||||
}
|
|
||||||
|
|
||||||
// trim trailing slashes
|
|
||||||
hostport = strings.TrimRight(hostport, "/")
|
|
||||||
|
|
||||||
host, port, err := net.SplitHostPort(hostport)
|
|
||||||
if err != nil {
|
|
||||||
host, port = "127.0.0.1", defaultPort
|
|
||||||
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
|
||||||
host = ip.String()
|
|
||||||
} else if hostport != "" {
|
|
||||||
host = hostport
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
|
|
||||||
return OllamaHost{}, ErrInvalidHostPort
|
|
||||||
}
|
|
||||||
|
|
||||||
return OllamaHost{
|
|
||||||
Scheme: scheme,
|
|
||||||
Host: host,
|
|
||||||
Port: port,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewClient(base *url.URL, http *http.Client) *Client {
|
func NewClient(base *url.URL, http *http.Client) *Client {
|
||||||
return &Client{
|
return &Client{
|
||||||
base: base,
|
base: base,
|
||||||
@@ -355,8 +304,8 @@ func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// List running models.
|
// List running models.
|
||||||
func (c *Client) ListRunning(ctx context.Context) (*ListResponse, error) {
|
func (c *Client) ListRunning(ctx context.Context) (*ProcessResponse, error) {
|
||||||
var lr ListResponse
|
var lr ProcessResponse
|
||||||
if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil {
|
if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@@ -1,11 +1,9 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/ollama/ollama/envconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestClientFromEnvironment(t *testing.T) {
|
func TestClientFromEnvironment(t *testing.T) {
|
||||||
@@ -35,6 +33,7 @@ func TestClientFromEnvironment(t *testing.T) {
|
|||||||
for k, v := range testCases {
|
for k, v := range testCases {
|
||||||
t.Run(k, func(t *testing.T) {
|
t.Run(k, func(t *testing.T) {
|
||||||
t.Setenv("OLLAMA_HOST", v.value)
|
t.Setenv("OLLAMA_HOST", v.value)
|
||||||
|
envconfig.LoadConfig()
|
||||||
|
|
||||||
client, err := ClientFromEnvironment()
|
client, err := ClientFromEnvironment()
|
||||||
if err != v.err {
|
if err != v.err {
|
||||||
@@ -46,40 +45,4 @@ func TestClientFromEnvironment(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
hostTestCases := map[string]*testCase{
|
|
||||||
"empty": {value: "", expect: "127.0.0.1:11434"},
|
|
||||||
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
|
|
||||||
"only port": {value: ":1234", expect: ":1234"},
|
|
||||||
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
|
|
||||||
"hostname": {value: "example.com", expect: "example.com:11434"},
|
|
||||||
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
|
|
||||||
"zero port": {value: ":0", expect: ":0"},
|
|
||||||
"too large port": {value: ":66000", err: ErrInvalidHostPort},
|
|
||||||
"too small port": {value: ":-1", err: ErrInvalidHostPort},
|
|
||||||
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
|
|
||||||
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
|
|
||||||
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
|
|
||||||
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
|
|
||||||
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
|
|
||||||
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
|
|
||||||
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
|
|
||||||
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range hostTestCases {
|
|
||||||
t.Run(k, func(t *testing.T) {
|
|
||||||
t.Setenv("OLLAMA_HOST", v.value)
|
|
||||||
|
|
||||||
oh, err := GetOllamaHost()
|
|
||||||
if err != v.err {
|
|
||||||
t.Fatalf("expected %s, got %s", v.err, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
host := net.JoinHostPort(oh.Host, oh.Port)
|
|
||||||
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
66
api/types.go
66
api/types.go
@@ -2,7 +2,6 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"math"
|
"math"
|
||||||
@@ -169,7 +168,7 @@ type Runner struct {
|
|||||||
F16KV bool `json:"f16_kv,omitempty"`
|
F16KV bool `json:"f16_kv,omitempty"`
|
||||||
LogitsAll bool `json:"logits_all,omitempty"`
|
LogitsAll bool `json:"logits_all,omitempty"`
|
||||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||||
UseMMap bool `json:"use_mmap,omitempty"`
|
UseMMap *bool `json:"use_mmap,omitempty"`
|
||||||
UseMLock bool `json:"use_mlock,omitempty"`
|
UseMLock bool `json:"use_mlock,omitempty"`
|
||||||
NumThread int `json:"num_thread,omitempty"`
|
NumThread int `json:"num_thread,omitempty"`
|
||||||
}
|
}
|
||||||
@@ -223,6 +222,7 @@ type ShowRequest struct {
|
|||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
System string `json:"system"`
|
System string `json:"system"`
|
||||||
Template string `json:"template"`
|
Template string `json:"template"`
|
||||||
|
Verbose bool `json:"verbose"`
|
||||||
|
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
|
|
||||||
@@ -239,6 +239,9 @@ type ShowResponse struct {
|
|||||||
System string `json:"system,omitempty"`
|
System string `json:"system,omitempty"`
|
||||||
Details ModelDetails `json:"details,omitempty"`
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
Messages []Message `json:"messages,omitempty"`
|
Messages []Message `json:"messages,omitempty"`
|
||||||
|
ModelInfo map[string]any `json:"model_info,omitempty"`
|
||||||
|
ProjectorInfo map[string]any `json:"projector_info,omitempty"`
|
||||||
|
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyRequest is the request passed to [Client.Copy].
|
// CopyRequest is the request passed to [Client.Copy].
|
||||||
@@ -282,19 +285,40 @@ type PushRequest struct {
|
|||||||
|
|
||||||
// ListResponse is the response from [Client.List].
|
// ListResponse is the response from [Client.List].
|
||||||
type ListResponse struct {
|
type ListResponse struct {
|
||||||
Models []ModelResponse `json:"models"`
|
Models []ListModelResponse `json:"models"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModelResponse is a single model description in [ListResponse].
|
// ProcessResponse is the response from [Client.Process].
|
||||||
type ModelResponse struct {
|
type ProcessResponse struct {
|
||||||
|
Models []ProcessModelResponse `json:"models"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListModelResponse is a single model description in [ListResponse].
|
||||||
|
type ListModelResponse struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
ModifiedAt time.Time `json:"modified_at"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Digest string `json:"digest"`
|
Digest string `json:"digest"`
|
||||||
Details ModelDetails `json:"details,omitempty"`
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
ExpiresAt time.Time `json:"expires_at,omitempty"`
|
}
|
||||||
SizeVRAM int64 `json:"size_vram,omitempty"`
|
|
||||||
|
// ProcessModelResponse is a single model description in [ProcessResponse].
|
||||||
|
type ProcessModelResponse struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Model string `json:"model"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Digest string `json:"digest"`
|
||||||
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
|
ExpiresAt time.Time `json:"expires_at"`
|
||||||
|
SizeVRAM int64 `json:"size_vram"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetrieveModelResponse struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Object string `json:"object"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
OwnedBy string `json:"owned_by"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TokenResponse struct {
|
type TokenResponse struct {
|
||||||
@@ -363,8 +387,6 @@ func (m *Metrics) Summary() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
|
|
||||||
|
|
||||||
func (opts *Options) FromMap(m map[string]interface{}) error {
|
func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||||
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
||||||
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct
|
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct
|
||||||
@@ -437,6 +459,17 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
|||||||
slice[i] = str
|
slice[i] = str
|
||||||
}
|
}
|
||||||
field.Set(reflect.ValueOf(slice))
|
field.Set(reflect.ValueOf(slice))
|
||||||
|
case reflect.Pointer:
|
||||||
|
var b bool
|
||||||
|
if field.Type() == reflect.TypeOf(&b) {
|
||||||
|
val, ok := val.(bool)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("option %q must be of type boolean", key)
|
||||||
|
}
|
||||||
|
field.Set(reflect.ValueOf(&val))
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("unknown type loading config params: %v %v", field.Kind(), field.Type())
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
||||||
}
|
}
|
||||||
@@ -479,7 +512,7 @@ func DefaultOptions() Options {
|
|||||||
LowVRAM: false,
|
LowVRAM: false,
|
||||||
F16KV: true,
|
F16KV: true,
|
||||||
UseMLock: false,
|
UseMLock: false,
|
||||||
UseMMap: true,
|
UseMMap: nil,
|
||||||
UseNUMA: false,
|
UseNUMA: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -576,6 +609,17 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) {
|
|||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
// TODO: only string slices are supported right now
|
// TODO: only string slices are supported right now
|
||||||
out[key] = vals
|
out[key] = vals
|
||||||
|
case reflect.Pointer:
|
||||||
|
var b bool
|
||||||
|
if field.Type() == reflect.TypeOf(&b) {
|
||||||
|
boolVal, err := strconv.ParseBool(vals[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid bool value %s", vals)
|
||||||
|
}
|
||||||
|
out[key] = &boolVal
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||||
}
|
}
|
||||||
|
@@ -2,6 +2,7 @@ package api
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -72,13 +73,13 @@ func TestDurationMarshalUnmarshal(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"positive duration",
|
"positive duration",
|
||||||
time.Duration(42 * time.Second),
|
42 * time.Second,
|
||||||
time.Duration(42 * time.Second),
|
42 * time.Second,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"another positive duration",
|
"another positive duration",
|
||||||
time.Duration(42 * time.Minute),
|
42 * time.Minute,
|
||||||
time.Duration(42 * time.Minute),
|
42 * time.Minute,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"zero duration",
|
"zero duration",
|
||||||
@@ -105,3 +106,105 @@ func TestDurationMarshalUnmarshal(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUseMmapParsingFromJSON(t *testing.T) {
|
||||||
|
tr := true
|
||||||
|
fa := false
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
req string
|
||||||
|
exp *bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Undefined",
|
||||||
|
req: `{ }`,
|
||||||
|
exp: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "True",
|
||||||
|
req: `{ "use_mmap": true }`,
|
||||||
|
exp: &tr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "False",
|
||||||
|
req: `{ "use_mmap": false }`,
|
||||||
|
exp: &fa,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
var oMap map[string]interface{}
|
||||||
|
err := json.Unmarshal([]byte(test.req), &oMap)
|
||||||
|
require.NoError(t, err)
|
||||||
|
opts := DefaultOptions()
|
||||||
|
err = opts.FromMap(oMap)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, test.exp, opts.UseMMap)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUseMmapFormatParams(t *testing.T) {
|
||||||
|
tr := true
|
||||||
|
fa := false
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
req map[string][]string
|
||||||
|
exp *bool
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "True",
|
||||||
|
req: map[string][]string{
|
||||||
|
"use_mmap": {"true"},
|
||||||
|
},
|
||||||
|
exp: &tr,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "False",
|
||||||
|
req: map[string][]string{
|
||||||
|
"use_mmap": {"false"},
|
||||||
|
},
|
||||||
|
exp: &fa,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Numeric True",
|
||||||
|
req: map[string][]string{
|
||||||
|
"use_mmap": {"1"},
|
||||||
|
},
|
||||||
|
exp: &tr,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Numeric False",
|
||||||
|
req: map[string][]string{
|
||||||
|
"use_mmap": {"0"},
|
||||||
|
},
|
||||||
|
exp: &fa,
|
||||||
|
err: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "invalid string",
|
||||||
|
req: map[string][]string{
|
||||||
|
"use_mmap": {"foo"},
|
||||||
|
},
|
||||||
|
exp: nil,
|
||||||
|
err: fmt.Errorf("invalid bool value [foo]"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
resp, err := FormatParams(test.req)
|
||||||
|
require.Equal(t, test.err, err)
|
||||||
|
respVal, ok := resp["use_mmap"]
|
||||||
|
if test.exp != nil {
|
||||||
|
assert.True(t, ok, "resp: %v", resp)
|
||||||
|
assert.Equal(t, *test.exp, *respVal.(*bool))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -5,6 +5,8 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ollama/ollama/envconfig"
|
"github.com/ollama/ollama/envconfig"
|
||||||
)
|
)
|
||||||
@@ -24,6 +26,7 @@ func InitLogging() {
|
|||||||
logFile = os.Stderr
|
logFile = os.Stderr
|
||||||
// TODO - write one-line to the app.log file saying we're running in console mode to help avoid confusion
|
// TODO - write one-line to the app.log file saying we're running in console mode to help avoid confusion
|
||||||
} else {
|
} else {
|
||||||
|
rotateLogs(AppLogFile)
|
||||||
logFile, err = os.OpenFile(AppLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
logFile, err = os.OpenFile(AppLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error(fmt.Sprintf("failed to create server log %v", err))
|
slog.Error(fmt.Sprintf("failed to create server log %v", err))
|
||||||
@@ -46,3 +49,32 @@ func InitLogging() {
|
|||||||
|
|
||||||
slog.Info("ollama app started")
|
slog.Info("ollama app started")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func rotateLogs(logFile string) {
|
||||||
|
if _, err := os.Stat(logFile); os.IsNotExist(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
index := strings.LastIndex(logFile, ".")
|
||||||
|
pre := logFile[:index]
|
||||||
|
post := "." + logFile[index+1:]
|
||||||
|
for i := LogRotationCount; i > 0; i-- {
|
||||||
|
older := pre + "-" + strconv.Itoa(i) + post
|
||||||
|
newer := pre + "-" + strconv.Itoa(i-1) + post
|
||||||
|
if i == 1 {
|
||||||
|
newer = pre + post
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(newer); err == nil {
|
||||||
|
if _, err := os.Stat(older); err == nil {
|
||||||
|
err := os.Remove(older)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("Failed to remove older log", "older", older, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := os.Rename(newer, older)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("Failed to rotate log", "older", older, "newer", newer, "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
44
app/lifecycle/logging_test.go
Normal file
44
app/lifecycle/logging_test.go
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
package lifecycle
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRotateLogs(t *testing.T) {
|
||||||
|
logDir := t.TempDir()
|
||||||
|
logFile := filepath.Join(logDir, "testlog.log")
|
||||||
|
|
||||||
|
// No log exists
|
||||||
|
rotateLogs(logFile)
|
||||||
|
|
||||||
|
require.NoError(t, os.WriteFile(logFile, []byte("1"), 0644))
|
||||||
|
assert.FileExists(t, logFile)
|
||||||
|
// First rotation
|
||||||
|
rotateLogs(logFile)
|
||||||
|
assert.FileExists(t, filepath.Join(logDir, "testlog-1.log"))
|
||||||
|
assert.NoFileExists(t, filepath.Join(logDir, "testlog-2.log"))
|
||||||
|
assert.NoFileExists(t, logFile)
|
||||||
|
|
||||||
|
// Should be a no-op without a new log
|
||||||
|
rotateLogs(logFile)
|
||||||
|
assert.FileExists(t, filepath.Join(logDir, "testlog-1.log"))
|
||||||
|
assert.NoFileExists(t, filepath.Join(logDir, "testlog-2.log"))
|
||||||
|
assert.NoFileExists(t, logFile)
|
||||||
|
|
||||||
|
for i := 2; i <= LogRotationCount+1; i++ {
|
||||||
|
require.NoError(t, os.WriteFile(logFile, []byte(strconv.Itoa(i)), 0644))
|
||||||
|
assert.FileExists(t, logFile)
|
||||||
|
rotateLogs(logFile)
|
||||||
|
assert.NoFileExists(t, logFile)
|
||||||
|
for j := 1; j < i; j++ {
|
||||||
|
assert.FileExists(t, filepath.Join(logDir, "testlog-"+strconv.Itoa(j)+".log"))
|
||||||
|
}
|
||||||
|
assert.NoFileExists(t, filepath.Join(logDir, "testlog-"+strconv.Itoa(i+1)+".log"))
|
||||||
|
}
|
||||||
|
}
|
@@ -21,6 +21,7 @@ var (
|
|||||||
ServerLogFile = "/tmp/ollama.log"
|
ServerLogFile = "/tmp/ollama.log"
|
||||||
UpgradeLogFile = "/tmp/ollama_update.log"
|
UpgradeLogFile = "/tmp/ollama_update.log"
|
||||||
Installer = "OllamaSetup.exe"
|
Installer = "OllamaSetup.exe"
|
||||||
|
LogRotationCount = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -69,7 +70,6 @@ func init() {
|
|||||||
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
|
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if runtime.GOOS == "darwin" {
|
} else if runtime.GOOS == "darwin" {
|
||||||
// TODO
|
// TODO
|
||||||
AppName += ".app"
|
AppName += ".app"
|
||||||
|
@@ -15,7 +15,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func getCLIFullPath(command string) string {
|
func getCLIFullPath(command string) string {
|
||||||
cmdPath := ""
|
var cmdPath string
|
||||||
appExe, err := os.Executable()
|
appExe, err := os.Executable()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
cmdPath = filepath.Join(filepath.Dir(appExe), command)
|
cmdPath = filepath.Join(filepath.Dir(appExe), command)
|
||||||
@@ -54,7 +54,7 @@ func start(ctx context.Context, command string) (*exec.Cmd, error) {
|
|||||||
return nil, fmt.Errorf("failed to spawn server stderr pipe: %w", err)
|
return nil, fmt.Errorf("failed to spawn server stderr pipe: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO - rotation
|
rotateLogs(ServerLogFile)
|
||||||
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to create server log: %w", err)
|
return nil, fmt.Errorf("failed to create server log: %w", err)
|
||||||
@@ -65,7 +65,6 @@ func start(ctx context.Context, command string) (*exec.Cmd, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, os.ErrNotExist) {
|
if !errors.Is(err, os.ErrNotExist) {
|
||||||
return nil, fmt.Errorf("stat ollama server log dir %s: %v", logDir, err)
|
return nil, fmt.Errorf("stat ollama server log dir %s: %v", logDir, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||||
|
@@ -24,7 +24,8 @@ func terminate(cmd *exec.Cmd) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer dll.Release() // nolint: errcheck
|
//nolint:errcheck
|
||||||
|
defer dll.Release()
|
||||||
|
|
||||||
pid := cmd.Process.Pid
|
pid := cmd.Process.Pid
|
||||||
|
|
||||||
@@ -73,7 +74,8 @@ func isProcessExited(pid int) (bool, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("failed to open process: %v", err)
|
return false, fmt.Errorf("failed to open process: %v", err)
|
||||||
}
|
}
|
||||||
defer windows.CloseHandle(hProcess) // nolint: errcheck
|
//nolint:errcheck
|
||||||
|
defer windows.CloseHandle(hProcess)
|
||||||
|
|
||||||
var exitCode uint32
|
var exitCode uint32
|
||||||
err = windows.GetExitCodeProcess(hProcess, &exitCode)
|
err = windows.GetExitCodeProcess(hProcess, &exitCode)
|
||||||
|
@@ -78,7 +78,7 @@ func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
|
|||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
if resp.StatusCode == 204 {
|
if resp.StatusCode == http.StatusNoContent {
|
||||||
slog.Debug("check update response 204 (current version is up to date)")
|
slog.Debug("check update response 204 (current version is up to date)")
|
||||||
return false, updateResp
|
return false, updateResp
|
||||||
}
|
}
|
||||||
@@ -87,7 +87,7 @@ func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
|
|||||||
slog.Warn(fmt.Sprintf("failed to read body response: %s", err))
|
slog.Warn(fmt.Sprintf("failed to read body response: %s", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != http.StatusOK {
|
||||||
slog.Info(fmt.Sprintf("check update error %d - %.96s", resp.StatusCode, string(body)))
|
slog.Info(fmt.Sprintf("check update error %d - %.96s", resp.StatusCode, string(body)))
|
||||||
return false, updateResp
|
return false, updateResp
|
||||||
}
|
}
|
||||||
@@ -114,7 +114,7 @@ func DownloadNewRelease(ctx context.Context, updateResp UpdateResponse) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error checking update: %w", err)
|
return fmt.Errorf("error checking update: %w", err)
|
||||||
}
|
}
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != http.StatusOK {
|
||||||
return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode)
|
return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode)
|
||||||
}
|
}
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
|
@@ -88,10 +88,15 @@ DialogFontSize=12
|
|||||||
[Files]
|
[Files]
|
||||||
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
||||||
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||||
Source: "..\dist\windows-{#ARCH}\*.dll"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
|
||||||
Source: "..\dist\windows-{#ARCH}\ollama_runners\*"; DestDir: "{app}\ollama_runners"; Flags: ignoreversion 64bit recursesubdirs
|
Source: "..\dist\windows-{#ARCH}\ollama_runners\*"; DestDir: "{app}\ollama_runners"; Flags: ignoreversion 64bit recursesubdirs
|
||||||
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
||||||
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
||||||
|
#if DirExists("..\dist\windows-amd64\cuda")
|
||||||
|
Source: "..\dist\windows-amd64\cuda\*"; DestDir: "{app}\cuda\"; Flags: ignoreversion recursesubdirs
|
||||||
|
#endif
|
||||||
|
#if DirExists("..\dist\windows-amd64\oneapi")
|
||||||
|
Source: "..\dist\windows-amd64\oneapi\*"; DestDir: "{app}\oneapi\"; Flags: ignoreversion recursesubdirs
|
||||||
|
#endif
|
||||||
#if DirExists("..\dist\windows-amd64\rocm")
|
#if DirExists("..\dist\windows-amd64\rocm")
|
||||||
Source: "..\dist\windows-amd64\rocm\*"; DestDir: "{app}\rocm\"; Flags: ignoreversion recursesubdirs
|
Source: "..\dist\windows-amd64\rocm\*"; DestDir: "{app}\rocm\"; Flags: ignoreversion recursesubdirs
|
||||||
#endif
|
#endif
|
||||||
@@ -122,6 +127,10 @@ Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\models"
|
|||||||
Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\history"
|
Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\history"
|
||||||
; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved
|
; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved
|
||||||
|
|
||||||
|
[InstallDelete]
|
||||||
|
Type: filesandordirs; Name: "{%TEMP}\ollama*"
|
||||||
|
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama"
|
||||||
|
|
||||||
[Messages]
|
[Messages]
|
||||||
WizardReady=Ollama Windows Preview
|
WizardReady=Ollama Windows Preview
|
||||||
ReadyLabel1=%nLet's get you up and running with your own large language models.
|
ReadyLabel1=%nLet's get you up and running with your own large language models.
|
||||||
|
@@ -4,5 +4,5 @@ write-host "Welcome to Ollama!"
|
|||||||
write-host ""
|
write-host ""
|
||||||
write-host "Run your first model:"
|
write-host "Run your first model:"
|
||||||
write-host ""
|
write-host ""
|
||||||
write-host "`tollama run llama2"
|
write-host "`tollama run llama3"
|
||||||
write-host ""
|
write-host ""
|
@@ -29,7 +29,6 @@ func GetID() string {
|
|||||||
initStore()
|
initStore()
|
||||||
}
|
}
|
||||||
return store.ID
|
return store.ID
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFirstTimeRun() bool {
|
func GetFirstTimeRun() bool {
|
||||||
|
@@ -47,7 +47,6 @@ func nativeLoop() {
|
|||||||
default:
|
default:
|
||||||
pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||||
pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,8 +159,8 @@ func (t *winTray) wndProc(hWnd windows.Handle, message uint32, wParam, lParam ui
|
|||||||
lResult, _, _ = pDefWindowProc.Call(
|
lResult, _, _ = pDefWindowProc.Call(
|
||||||
uintptr(hWnd),
|
uintptr(hWnd),
|
||||||
uintptr(message),
|
uintptr(message),
|
||||||
uintptr(wParam),
|
wParam,
|
||||||
uintptr(lParam),
|
lParam,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
@@ -186,7 +186,7 @@ func (t *winTray) initInstance() error {
|
|||||||
t.muNID.Lock()
|
t.muNID.Lock()
|
||||||
defer t.muNID.Unlock()
|
defer t.muNID.Unlock()
|
||||||
t.nid = ¬ifyIconData{
|
t.nid = ¬ifyIconData{
|
||||||
Wnd: windows.Handle(t.window),
|
Wnd: t.window,
|
||||||
ID: 100,
|
ID: 100,
|
||||||
Flags: NIF_MESSAGE,
|
Flags: NIF_MESSAGE,
|
||||||
CallbackMessage: t.wmSystrayMessage,
|
CallbackMessage: t.wmSystrayMessage,
|
||||||
@@ -197,7 +197,6 @@ func (t *winTray) initInstance() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *winTray) createMenu() error {
|
func (t *winTray) createMenu() error {
|
||||||
|
|
||||||
menuHandle, _, err := pCreatePopupMenu.Call()
|
menuHandle, _, err := pCreatePopupMenu.Call()
|
||||||
if menuHandle == 0 {
|
if menuHandle == 0 {
|
||||||
return err
|
return err
|
||||||
@@ -246,7 +245,7 @@ func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title
|
|||||||
mi := menuItemInfo{
|
mi := menuItemInfo{
|
||||||
Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE,
|
Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE,
|
||||||
Type: MFT_STRING,
|
Type: MFT_STRING,
|
||||||
ID: uint32(menuItemId),
|
ID: menuItemId,
|
||||||
TypeData: titlePtr,
|
TypeData: titlePtr,
|
||||||
Cch: uint32(len(title)),
|
Cch: uint32(len(title)),
|
||||||
}
|
}
|
||||||
@@ -302,11 +301,10 @@ func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error {
|
func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error {
|
||||||
|
|
||||||
mi := menuItemInfo{
|
mi := menuItemInfo{
|
||||||
Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE,
|
Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE,
|
||||||
Type: MFT_SEPARATOR,
|
Type: MFT_SEPARATOR,
|
||||||
ID: uint32(menuItemId),
|
ID: menuItemId,
|
||||||
}
|
}
|
||||||
|
|
||||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||||
@@ -426,7 +424,6 @@ func iconBytesToFilePath(iconBytes []byte) (string, error) {
|
|||||||
// Loads an image from file and shows it in tray.
|
// Loads an image from file and shows it in tray.
|
||||||
// Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx
|
// Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx
|
||||||
func (t *winTray) setIcon(src string) error {
|
func (t *winTray) setIcon(src string) error {
|
||||||
|
|
||||||
h, err := t.loadIconFrom(src)
|
h, err := t.loadIconFrom(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -444,7 +441,6 @@ func (t *winTray) setIcon(src string) error {
|
|||||||
// Loads an image from file to be shown in tray or menu item.
|
// Loads an image from file to be shown in tray or menu item.
|
||||||
// LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx
|
// LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx
|
||||||
func (t *winTray) loadIconFrom(src string) (windows.Handle, error) {
|
func (t *winTray) loadIconFrom(src string) (windows.Handle, error) {
|
||||||
|
|
||||||
// Save and reuse handles of loaded images
|
// Save and reuse handles of loaded images
|
||||||
t.muLoadedImages.RLock()
|
t.muLoadedImages.RLock()
|
||||||
h, ok := t.loadedImages[src]
|
h, ok := t.loadedImages[src]
|
||||||
|
226
cmd/cmd.go
226
cmd/cmd.go
@@ -20,6 +20,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
@@ -29,7 +30,6 @@ import (
|
|||||||
"github.com/olekukonko/tablewriter"
|
"github.com/olekukonko/tablewriter"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
@@ -162,9 +162,6 @@ func tempZipFiles(path string) (string, error) {
|
|||||||
}
|
}
|
||||||
defer tempfile.Close()
|
defer tempfile.Close()
|
||||||
|
|
||||||
zipfile := zip.NewWriter(tempfile)
|
|
||||||
defer zipfile.Close()
|
|
||||||
|
|
||||||
detectContentType := func(path string) (string, error) {
|
detectContentType := func(path string) (string, error) {
|
||||||
f, err := os.Open(path)
|
f, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -233,6 +230,9 @@ func tempZipFiles(path string) (string, error) {
|
|||||||
files = append(files, tks...)
|
files = append(files, tks...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zipfile := zip.NewWriter(tempfile)
|
||||||
|
defer zipfile.Close()
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
f, err := os.Open(file)
|
f, err := os.Open(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -287,38 +287,12 @@ func createBlob(cmd *cobra.Command, client *api.Client, path string) (string, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RunHandler(cmd *cobra.Command, args []string) error {
|
func RunHandler(cmd *cobra.Command, args []string) error {
|
||||||
client, err := api.ClientFromEnvironment()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
name := args[0]
|
|
||||||
|
|
||||||
// check if the model exists on the server
|
|
||||||
show, err := client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
|
||||||
var statusError api.StatusError
|
|
||||||
switch {
|
|
||||||
case errors.As(err, &statusError) && statusError.StatusCode == http.StatusNotFound:
|
|
||||||
if err := PullHandler(cmd, []string{name}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
show, err = client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case err != nil:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
interactive := true
|
interactive := true
|
||||||
|
|
||||||
opts := runOptions{
|
opts := runOptions{
|
||||||
Model: args[0],
|
Model: args[0],
|
||||||
WordWrap: os.Getenv("TERM") == "xterm-256color",
|
WordWrap: os.Getenv("TERM") == "xterm-256color",
|
||||||
Options: map[string]interface{}{},
|
Options: map[string]interface{}{},
|
||||||
MultiModal: slices.Contains(show.Details.Families, "clip"),
|
|
||||||
ParentModel: show.Details.ParentModel,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
format, err := cmd.Flags().GetString("format")
|
format, err := cmd.Flags().GetString("format")
|
||||||
@@ -362,12 +336,39 @@ func RunHandler(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
opts.WordWrap = !nowrap
|
opts.WordWrap = !nowrap
|
||||||
|
|
||||||
if !interactive {
|
// Fill out the rest of the options based on information about the
|
||||||
return generate(cmd, opts)
|
// model.
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
name := args[0]
|
||||||
|
info, err := func() (*api.ShowResponse, error) {
|
||||||
|
showReq := &api.ShowRequest{Name: name}
|
||||||
|
info, err := client.Show(cmd.Context(), showReq)
|
||||||
|
var se api.StatusError
|
||||||
|
if errors.As(err, &se) && se.StatusCode == http.StatusNotFound {
|
||||||
|
if err := PullHandler(cmd, []string{name}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
||||||
|
}
|
||||||
|
return info, err
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts.MultiModal = slices.Contains(info.Details.Families, "clip")
|
||||||
|
opts.ParentModel = info.Details.ParentModel
|
||||||
|
opts.Messages = append(opts.Messages, info.Messages...)
|
||||||
|
|
||||||
|
if interactive {
|
||||||
return generateInteractive(cmd, opts)
|
return generateInteractive(cmd, opts)
|
||||||
}
|
}
|
||||||
|
return generate(cmd, opts)
|
||||||
|
}
|
||||||
|
|
||||||
func errFromUnknownKey(unknownKeyErr error) error {
|
func errFromUnknownKey(unknownKeyErr error) error {
|
||||||
// find SSH public key in the error message
|
// find SSH public key in the error message
|
||||||
@@ -579,10 +580,6 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(args) != 1 {
|
|
||||||
return errors.New("missing model name")
|
|
||||||
}
|
|
||||||
|
|
||||||
license, errLicense := cmd.Flags().GetBool("license")
|
license, errLicense := cmd.Flags().GetBool("license")
|
||||||
modelfile, errModelfile := cmd.Flags().GetBool("modelfile")
|
modelfile, errModelfile := cmd.Flags().GetBool("modelfile")
|
||||||
parameters, errParams := cmd.Flags().GetBool("parameters")
|
parameters, errParams := cmd.Flags().GetBool("parameters")
|
||||||
@@ -625,8 +622,6 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
|
|
||||||
if flagsSet > 1 {
|
if flagsSet > 1 {
|
||||||
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
||||||
} else if flagsSet == 0 {
|
|
||||||
return errors.New("one of '--license', '--modelfile', '--parameters', '--system', or '--template' must be specified")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
req := api.ShowRequest{Name: args[0]}
|
req := api.ShowRequest{Name: args[0]}
|
||||||
@@ -635,6 +630,7 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if flagsSet == 1 {
|
||||||
switch showType {
|
switch showType {
|
||||||
case "license":
|
case "license":
|
||||||
fmt.Println(resp.License)
|
fmt.Println(resp.License)
|
||||||
@@ -651,6 +647,124 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
showInfo(resp)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func showInfo(resp *api.ShowResponse) {
|
||||||
|
arch := resp.ModelInfo["general.architecture"].(string)
|
||||||
|
|
||||||
|
modelData := [][]string{
|
||||||
|
{"arch", arch},
|
||||||
|
{"parameters", resp.Details.ParameterSize},
|
||||||
|
{"quantization", resp.Details.QuantizationLevel},
|
||||||
|
{"context length", fmt.Sprintf("%v", resp.ModelInfo[fmt.Sprintf("%s.context_length", arch)].(float64))},
|
||||||
|
{"embedding length", fmt.Sprintf("%v", resp.ModelInfo[fmt.Sprintf("%s.embedding_length", arch)].(float64))},
|
||||||
|
}
|
||||||
|
|
||||||
|
mainTableData := [][]string{
|
||||||
|
{"Model"},
|
||||||
|
{renderSubTable(modelData, false)},
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.ProjectorInfo != nil {
|
||||||
|
projectorData := [][]string{
|
||||||
|
{"arch", "clip"},
|
||||||
|
{"parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))},
|
||||||
|
}
|
||||||
|
|
||||||
|
if projectorType, ok := resp.ProjectorInfo["clip.projector_type"]; ok {
|
||||||
|
projectorData = append(projectorData, []string{"projector type", projectorType.(string)})
|
||||||
|
}
|
||||||
|
|
||||||
|
projectorData = append(projectorData,
|
||||||
|
[]string{"embedding length", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.embedding_length"].(float64))},
|
||||||
|
[]string{"projection dimensionality", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.projection_dim"].(float64))},
|
||||||
|
)
|
||||||
|
|
||||||
|
mainTableData = append(mainTableData,
|
||||||
|
[]string{"Projector"},
|
||||||
|
[]string{renderSubTable(projectorData, false)},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Parameters != "" {
|
||||||
|
mainTableData = append(mainTableData, []string{"Parameters"}, []string{formatParams(resp.Parameters)})
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.System != "" {
|
||||||
|
mainTableData = append(mainTableData, []string{"System"}, []string{renderSubTable(twoLines(resp.System), true)})
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.License != "" {
|
||||||
|
mainTableData = append(mainTableData, []string{"License"}, []string{renderSubTable(twoLines(resp.License), true)})
|
||||||
|
}
|
||||||
|
|
||||||
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
|
table.SetAutoWrapText(false)
|
||||||
|
table.SetBorder(false)
|
||||||
|
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||||
|
|
||||||
|
for _, v := range mainTableData {
|
||||||
|
table.Append(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
table.Render()
|
||||||
|
}
|
||||||
|
|
||||||
|
func renderSubTable(data [][]string, file bool) string {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
table := tablewriter.NewWriter(&buf)
|
||||||
|
table.SetAutoWrapText(!file)
|
||||||
|
table.SetBorder(false)
|
||||||
|
table.SetNoWhiteSpace(true)
|
||||||
|
table.SetTablePadding("\t")
|
||||||
|
table.SetAlignment(tablewriter.ALIGN_LEFT)
|
||||||
|
|
||||||
|
for _, v := range data {
|
||||||
|
table.Append(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
table.Render()
|
||||||
|
|
||||||
|
renderedTable := buf.String()
|
||||||
|
lines := strings.Split(renderedTable, "\n")
|
||||||
|
for i, line := range lines {
|
||||||
|
lines[i] = "\t" + line
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(lines, "\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
func twoLines(s string) [][]string {
|
||||||
|
lines := strings.Split(s, "\n")
|
||||||
|
res := [][]string{}
|
||||||
|
|
||||||
|
count := 0
|
||||||
|
for _, line := range lines {
|
||||||
|
line = strings.TrimSpace(line)
|
||||||
|
if line != "" {
|
||||||
|
count++
|
||||||
|
res = append(res, []string{line})
|
||||||
|
if count == 2 {
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatParams(s string) string {
|
||||||
|
lines := strings.Split(s, "\n")
|
||||||
|
table := [][]string{}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
table = append(table, strings.Fields(line))
|
||||||
|
}
|
||||||
|
return renderSubTable(table, false)
|
||||||
|
}
|
||||||
|
|
||||||
func CopyHandler(cmd *cobra.Command, args []string) error {
|
func CopyHandler(cmd *cobra.Command, args []string) error {
|
||||||
client, err := api.ClientFromEnvironment()
|
client, err := api.ClientFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -746,7 +860,6 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
|
|||||||
if wordWrap && termWidth >= 10 {
|
if wordWrap && termWidth >= 10 {
|
||||||
for _, ch := range content {
|
for _, ch := range content {
|
||||||
if state.lineLength+1 > termWidth-5 {
|
if state.lineLength+1 > termWidth-5 {
|
||||||
|
|
||||||
if runewidth.StringWidth(state.wordBuffer) > termWidth-10 {
|
if runewidth.StringWidth(state.wordBuffer) > termWidth-10 {
|
||||||
fmt.Printf("%s%c", state.wordBuffer, ch)
|
fmt.Printf("%s%c", state.wordBuffer, ch)
|
||||||
state.wordBuffer = ""
|
state.wordBuffer = ""
|
||||||
@@ -755,7 +868,11 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// backtrack the length of the last word and clear to the end of the line
|
// backtrack the length of the last word and clear to the end of the line
|
||||||
fmt.Printf("\x1b[%dD\x1b[K\n", runewidth.StringWidth(state.wordBuffer))
|
a := runewidth.StringWidth(state.wordBuffer)
|
||||||
|
if a > 0 {
|
||||||
|
fmt.Printf("\x1b[%dD", a)
|
||||||
|
}
|
||||||
|
fmt.Printf("\x1b[K\n")
|
||||||
fmt.Printf("%s%c", state.wordBuffer, ch)
|
fmt.Printf("%s%c", state.wordBuffer, ch)
|
||||||
chWidth := runewidth.RuneWidth(ch)
|
chWidth := runewidth.RuneWidth(ch)
|
||||||
|
|
||||||
@@ -957,17 +1074,11 @@ func generate(cmd *cobra.Command, opts runOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func RunServer(cmd *cobra.Command, _ []string) error {
|
func RunServer(cmd *cobra.Command, _ []string) error {
|
||||||
// retrieve the OLLAMA_HOST environment variable
|
|
||||||
ollamaHost, err := api.GetOllamaHost()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := initializeKeypair(); err != nil {
|
if err := initializeKeypair(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ln, err := net.Listen("tcp", net.JoinHostPort(ollamaHost.Host, ollamaHost.Port))
|
ln, err := net.Listen("tcp", net.JoinHostPort(envconfig.Host.Host, envconfig.Host.Port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -1026,24 +1137,6 @@ func initializeKeypair() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:unused
|
|
||||||
func waitForServer(ctx context.Context, client *api.Client) error {
|
|
||||||
// wait for the server to start
|
|
||||||
timeout := time.After(5 * time.Second)
|
|
||||||
tick := time.Tick(500 * time.Millisecond)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-timeout:
|
|
||||||
return errors.New("timed out waiting for server to start")
|
|
||||||
case <-tick:
|
|
||||||
if err := client.Heartbeat(ctx); err == nil {
|
|
||||||
return nil // server has started
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
|
func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
|
||||||
client, err := api.ClientFromEnvironment()
|
client, err := api.ClientFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1251,6 +1344,9 @@ func NewCLI() *cobra.Command {
|
|||||||
envVars["OLLAMA_NOPRUNE"],
|
envVars["OLLAMA_NOPRUNE"],
|
||||||
envVars["OLLAMA_ORIGINS"],
|
envVars["OLLAMA_ORIGINS"],
|
||||||
envVars["OLLAMA_TMPDIR"],
|
envVars["OLLAMA_TMPDIR"],
|
||||||
|
envVars["OLLAMA_FLASH_ATTENTION"],
|
||||||
|
envVars["OLLAMA_LLM_LIBRARY"],
|
||||||
|
envVars["OLLAMA_MAX_VRAM"],
|
||||||
})
|
})
|
||||||
default:
|
default:
|
||||||
appendEnvDocs(cmd, envs)
|
appendEnvDocs(cmd, envs)
|
||||||
|
@@ -8,11 +8,11 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/envconfig"
|
"github.com/ollama/ollama/envconfig"
|
||||||
@@ -31,41 +31,24 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
||||||
client, err := api.ClientFromEnvironment()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p := progress.NewProgress(os.Stderr)
|
p := progress.NewProgress(os.Stderr)
|
||||||
defer p.StopAndClear()
|
defer p.StopAndClear()
|
||||||
|
|
||||||
spinner := progress.NewSpinner("")
|
spinner := progress.NewSpinner("")
|
||||||
p.Add("", spinner)
|
p.Add("", spinner)
|
||||||
|
|
||||||
showReq := api.ShowRequest{Name: opts.Model}
|
client, err := api.ClientFromEnvironment()
|
||||||
showResp, err := client.Show(cmd.Context(), &showReq)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
opts.MultiModal = slices.Contains(showResp.Details.Families, "clip")
|
|
||||||
opts.ParentModel = showResp.Details.ParentModel
|
|
||||||
|
|
||||||
if len(showResp.Messages) > 0 {
|
|
||||||
opts.Messages = append(opts.Messages, showResp.Messages...)
|
|
||||||
}
|
|
||||||
|
|
||||||
chatReq := &api.ChatRequest{
|
chatReq := &api.ChatRequest{
|
||||||
Model: opts.Model,
|
Model: opts.Model,
|
||||||
Messages: []api.Message{},
|
KeepAlive: opts.KeepAlive,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.KeepAlive != nil {
|
return client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
||||||
chatReq.KeepAlive = opts.KeepAlive
|
|
||||||
}
|
|
||||||
|
|
||||||
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
|
||||||
p.StopAndClear()
|
p.StopAndClear()
|
||||||
if len(opts.Messages) > 0 {
|
|
||||||
for _, msg := range opts.Messages {
|
for _, msg := range opts.Messages {
|
||||||
switch msg.Role {
|
switch msg.Role {
|
||||||
case "user":
|
case "user":
|
||||||
@@ -77,19 +60,11 @@ func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
|||||||
fmt.Println()
|
fmt.Println()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||||
opts.Messages = make([]api.Message, 0)
|
|
||||||
|
|
||||||
err := loadModel(cmd, &opts)
|
err := loadModel(cmd, &opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -429,15 +404,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
|
|
||||||
switch args[1] {
|
switch args[1] {
|
||||||
case "info":
|
case "info":
|
||||||
fmt.Println("Model details:")
|
showInfo(resp)
|
||||||
if len(resp.Details.Families) > 0 {
|
|
||||||
fmt.Printf("Family %s\n", strings.Join(resp.Details.Families, ", "))
|
|
||||||
} else if resp.Details.Family != "" {
|
|
||||||
fmt.Printf("Family %s\n", resp.Details.Family)
|
|
||||||
}
|
|
||||||
fmt.Printf("Parameter Size %s\n", resp.Details.ParameterSize)
|
|
||||||
fmt.Printf("Quantization Level %s\n", resp.Details.QuantizationLevel)
|
|
||||||
fmt.Println("")
|
|
||||||
case "license":
|
case "license":
|
||||||
if resp.License == "" {
|
if resp.License == "" {
|
||||||
fmt.Println("No license was specified for this model.")
|
fmt.Println("No license was specified for this model.")
|
||||||
|
@@ -6,6 +6,7 @@ import (
|
|||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
)
|
)
|
||||||
@@ -85,11 +86,11 @@ MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
|||||||
`
|
`
|
||||||
|
|
||||||
tmpl, err := template.New("").Parse(expectedModelfile)
|
tmpl, err := template.New("").Parse(expectedModelfile)
|
||||||
assert.Nil(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
err = tmpl.Execute(&buf, opts)
|
err = tmpl.Execute(&buf, opts)
|
||||||
assert.Nil(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, buf.String(), mf)
|
assert.Equal(t, buf.String(), mf)
|
||||||
|
|
||||||
opts.ParentModel = "horseshark"
|
opts.ParentModel = "horseshark"
|
||||||
@@ -107,10 +108,10 @@ MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
|||||||
`
|
`
|
||||||
|
|
||||||
tmpl, err = template.New("").Parse(expectedModelfile)
|
tmpl, err = template.New("").Parse(expectedModelfile)
|
||||||
assert.Nil(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var parentBuf bytes.Buffer
|
var parentBuf bytes.Buffer
|
||||||
err = tmpl.Execute(&parentBuf, opts)
|
err = tmpl.Execute(&parentBuf, opts)
|
||||||
assert.Nil(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, parentBuf.String(), mf)
|
assert.Equal(t, parentBuf.String(), mf)
|
||||||
}
|
}
|
||||||
|
27
cmd/start.go
Normal file
27
cmd/start.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
//go:build darwin || windows
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitForServer(ctx context.Context, client *api.Client) error {
|
||||||
|
// wait for the server to start
|
||||||
|
timeout := time.After(5 * time.Second)
|
||||||
|
tick := time.Tick(500 * time.Millisecond)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
return errors.New("timed out waiting for server to start")
|
||||||
|
case <-tick:
|
||||||
|
if err := client.Heartbeat(ctx); err == nil {
|
||||||
|
return nil // server has started
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@@ -189,7 +189,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
|||||||
if params.VocabSize > len(v.Tokens) {
|
if params.VocabSize > len(v.Tokens) {
|
||||||
missingTokens := params.VocabSize - len(v.Tokens)
|
missingTokens := params.VocabSize - len(v.Tokens)
|
||||||
slog.Warn(fmt.Sprintf("vocab is missing %d tokens", missingTokens))
|
slog.Warn(fmt.Sprintf("vocab is missing %d tokens", missingTokens))
|
||||||
for cnt := 0; cnt < missingTokens; cnt++ {
|
for cnt := range missingTokens {
|
||||||
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
||||||
v.Scores = append(v.Scores, -1)
|
v.Scores = append(v.Scores, -1)
|
||||||
v.Types = append(v.Types, tokenTypeUserDefined)
|
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||||
|
@@ -35,7 +35,6 @@ func addOnes(data []float32, vectorSize int) ([]float32, error) {
|
|||||||
f32s = append(f32s, t...)
|
f32s = append(f32s, t...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return f32s, nil
|
return f32s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -119,11 +119,12 @@ func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([
|
|||||||
}
|
}
|
||||||
|
|
||||||
var heads int
|
var heads int
|
||||||
if strings.HasSuffix(name, "attn_q.weight") {
|
switch {
|
||||||
|
case strings.HasSuffix(name, "attn_q.weight"):
|
||||||
heads = params.AttentionHeads
|
heads = params.AttentionHeads
|
||||||
} else if strings.HasSuffix(name, "attn_k.weight") {
|
case strings.HasSuffix(name, "attn_k.weight"):
|
||||||
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
|
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
|
||||||
} else {
|
default:
|
||||||
return nil, fmt.Errorf("unknown tensor name: %s", name)
|
return nil, fmt.Errorf("unknown tensor name: %s", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -120,7 +120,7 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
|
|||||||
Name: name,
|
Name: name,
|
||||||
Kind: kind,
|
Kind: kind,
|
||||||
Offset: offset,
|
Offset: offset,
|
||||||
Shape: shape[:],
|
Shape: shape,
|
||||||
}
|
}
|
||||||
|
|
||||||
t.WriterTo = safetensorWriterTo{
|
t.WriterTo = safetensorWriterTo{
|
||||||
|
@@ -85,13 +85,10 @@ func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, e
|
|||||||
|
|
||||||
sha256sum := sha256.New()
|
sha256sum := sha256.New()
|
||||||
for _, pt := range t.PreTokenizer.PreTokenizers {
|
for _, pt := range t.PreTokenizer.PreTokenizers {
|
||||||
switch pt.Type {
|
if pt.Type == "Split" && pt.Pattern.Regex != "" {
|
||||||
case "Split":
|
|
||||||
if pt.Pattern.Regex != "" {
|
|
||||||
sha256sum.Write([]byte(pt.Pattern.Regex))
|
sha256sum.Write([]byte(pt.Pattern.Regex))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest {
|
switch digest := fmt.Sprintf("%x", sha256sum.Sum(nil)); digest {
|
||||||
case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f":
|
case "d98f9631be1e9607a9848c26c1f9eac1aa9fc21ac6ba82a2fc0741af9780a48f":
|
||||||
|
@@ -88,7 +88,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
|
|||||||
Name: ggufName,
|
Name: ggufName,
|
||||||
Kind: kind,
|
Kind: kind,
|
||||||
Offset: offset, // calculate the offset
|
Offset: offset, // calculate the offset
|
||||||
Shape: shape[:],
|
Shape: shape,
|
||||||
}
|
}
|
||||||
|
|
||||||
tensor.WriterTo = torchWriterTo{
|
tensor.WriterTo = torchWriterTo{
|
||||||
@@ -104,7 +104,6 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return tensors, nil
|
return tensors, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAltParams(dirpath string) (*Params, error) {
|
func getAltParams(dirpath string) (*Params, error) {
|
||||||
|
89
docs/api.md
89
docs/api.md
@@ -12,6 +12,7 @@
|
|||||||
- [Pull a Model](#pull-a-model)
|
- [Pull a Model](#pull-a-model)
|
||||||
- [Push a Model](#push-a-model)
|
- [Push a Model](#push-a-model)
|
||||||
- [Generate Embeddings](#generate-embeddings)
|
- [Generate Embeddings](#generate-embeddings)
|
||||||
|
- [List Running Models](#list-running-models)
|
||||||
|
|
||||||
## Conventions
|
## Conventions
|
||||||
|
|
||||||
@@ -25,7 +26,7 @@ All durations are returned in nanoseconds.
|
|||||||
|
|
||||||
### Streaming responses
|
### Streaming responses
|
||||||
|
|
||||||
Certain endpoints stream responses as JSON objects and can optional return non-streamed responses.
|
Certain endpoints stream responses as JSON objects. Streaming can be disabled by providing `{"stream": false}` for these endpoints.
|
||||||
|
|
||||||
## Generate a completion
|
## Generate a completion
|
||||||
|
|
||||||
@@ -249,7 +250,7 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
|
|
||||||
#### Request (Reproducible outputs)
|
#### Request (Reproducible outputs)
|
||||||
|
|
||||||
For reproducible outputs, set `temperature` to 0 and `seed` to a number:
|
For reproducible outputs, set `seed` to a number:
|
||||||
|
|
||||||
##### Request
|
##### Request
|
||||||
|
|
||||||
@@ -258,8 +259,7 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
"model": "mistral",
|
"model": "mistral",
|
||||||
"prompt": "Why is the sky blue?",
|
"prompt": "Why is the sky blue?",
|
||||||
"options": {
|
"options": {
|
||||||
"seed": 123,
|
"seed": 123
|
||||||
"temperature": 0
|
|
||||||
}
|
}
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
@@ -777,11 +777,12 @@ A single JSON object will be returned.
|
|||||||
POST /api/show
|
POST /api/show
|
||||||
```
|
```
|
||||||
|
|
||||||
Show information about a model including details, modelfile, template, parameters, license, and system prompt.
|
Show information about a model including details, modelfile, template, parameters, license, system prompt.
|
||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
- `name`: name of the model to show
|
- `name`: name of the model to show
|
||||||
|
- `verbose`: (optional) if set to `true`, returns full data for verbose response fields
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
@@ -798,14 +799,40 @@ curl http://localhost:11434/api/show -d '{
|
|||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSISTANT:\"",
|
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM llava:latest\n\nFROM /Users/matt/.ollama/models/blobs/sha256:200765e1283640ffbd013184bf496e261032fa75b99498a9613be4e94d63ad52\nTEMPLATE \"\"\"{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: \"\"\"\nPARAMETER num_ctx 4096\nPARAMETER stop \"\u003c/s\u003e\"\nPARAMETER stop \"USER:\"\nPARAMETER stop \"ASSISTANT:\"",
|
||||||
"parameters": "num_ctx 4096\nstop \u003c/s\u003e\nstop USER:\nstop ASSISTANT:",
|
"parameters": "num_keep 24\nstop \"<|start_header_id|>\"\nstop \"<|end_header_id|>\"\nstop \"<|eot_id|>\"",
|
||||||
"template": "{{ .System }}\nUSER: {{ .Prompt }}\nASSISTANT: ",
|
"template": "{{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|>",
|
||||||
"details": {
|
"details": {
|
||||||
|
"parent_model": "",
|
||||||
"format": "gguf",
|
"format": "gguf",
|
||||||
"family": "llama",
|
"family": "llama",
|
||||||
"families": ["llama", "clip"],
|
"families": [
|
||||||
"parameter_size": "7B",
|
"llama"
|
||||||
|
],
|
||||||
|
"parameter_size": "8.0B",
|
||||||
"quantization_level": "Q4_0"
|
"quantization_level": "Q4_0"
|
||||||
|
},
|
||||||
|
"model_info": {
|
||||||
|
"general.architecture": "llama",
|
||||||
|
"general.file_type": 2,
|
||||||
|
"general.parameter_count": 8030261248,
|
||||||
|
"general.quantization_version": 2,
|
||||||
|
"llama.attention.head_count": 32,
|
||||||
|
"llama.attention.head_count_kv": 8,
|
||||||
|
"llama.attention.layer_norm_rms_epsilon": 0.00001,
|
||||||
|
"llama.block_count": 32,
|
||||||
|
"llama.context_length": 8192,
|
||||||
|
"llama.embedding_length": 4096,
|
||||||
|
"llama.feed_forward_length": 14336,
|
||||||
|
"llama.rope.dimension_count": 128,
|
||||||
|
"llama.rope.freq_base": 500000,
|
||||||
|
"llama.vocab_size": 128256,
|
||||||
|
"tokenizer.ggml.bos_token_id": 128000,
|
||||||
|
"tokenizer.ggml.eos_token_id": 128009,
|
||||||
|
"tokenizer.ggml.merges": [], // populates if `verbose=true`
|
||||||
|
"tokenizer.ggml.model": "gpt2",
|
||||||
|
"tokenizer.ggml.pre": "llama-bpe",
|
||||||
|
"tokenizer.ggml.token_type": [], // populates if `verbose=true`
|
||||||
|
"tokenizer.ggml.tokens": [] // populates if `verbose=true`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -1035,3 +1062,47 @@ curl http://localhost:11434/api/embeddings -d '{
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## List Running Models
|
||||||
|
```shell
|
||||||
|
GET /api/ps
|
||||||
|
```
|
||||||
|
|
||||||
|
List models that are currently loaded into memory.
|
||||||
|
|
||||||
|
#### Examples
|
||||||
|
|
||||||
|
### Request
|
||||||
|
|
||||||
|
```shell
|
||||||
|
curl http://localhost:11434/api/ps
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Response
|
||||||
|
|
||||||
|
A single JSON object will be returned.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"models": [
|
||||||
|
{
|
||||||
|
"name": "mistral:latest",
|
||||||
|
"model": "mistral:latest",
|
||||||
|
"size": 5137025024,
|
||||||
|
"digest": "2ae6f6dd7a3dd734790bbbf58b8909a606e0e7e97e94b7604e0aa7ae4490e6d8",
|
||||||
|
"details": {
|
||||||
|
"parent_model": "",
|
||||||
|
"format": "gguf",
|
||||||
|
"family": "llama",
|
||||||
|
"families": [
|
||||||
|
"llama"
|
||||||
|
],
|
||||||
|
"parameter_size": "7.2B",
|
||||||
|
"quantization_level": "Q4_0"
|
||||||
|
},
|
||||||
|
"expires_at": "2024-06-04T14:38:31.83753-07:00",
|
||||||
|
"size_vram": 5137025024
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
@@ -104,7 +104,7 @@ like to use. For example, to compile an optimized binary for an Intel i9-9880H,
|
|||||||
you might use:
|
you might use:
|
||||||
|
|
||||||
```
|
```
|
||||||
OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./...
|
OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on" go generate ./...
|
||||||
go build .
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -114,15 +114,18 @@ If you have Docker available, you can build linux binaries with `./scripts/build
|
|||||||
|
|
||||||
### Windows
|
### Windows
|
||||||
|
|
||||||
Note: The windows build for Ollama is still under development.
|
Note: The Windows build for Ollama is still under development.
|
||||||
|
|
||||||
Install required tools:
|
First, install required tools:
|
||||||
|
|
||||||
- MSVC toolchain - C/C++ and cmake as minimal requirements
|
- MSVC toolchain - C/C++ and cmake as minimal requirements
|
||||||
- Go version 1.22 or higher
|
- Go version 1.22 or higher
|
||||||
- MinGW (pick one variant) with GCC.
|
- MinGW (pick one variant) with GCC.
|
||||||
- [MinGW-w64](https://www.mingw-w64.org/)
|
- [MinGW-w64](https://www.mingw-w64.org/)
|
||||||
- [MSYS2](https://www.msys2.org/)
|
- [MSYS2](https://www.msys2.org/)
|
||||||
|
- The `ThreadJob` Powershell module: `Install-Module -Name ThreadJob -Scope CurrentUser`
|
||||||
|
|
||||||
|
Then, build the `ollama` binary:
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
$env:CGO_ENABLED="1"
|
$env:CGO_ENABLED="1"
|
||||||
|
16
docs/faq.md
16
docs/faq.md
@@ -257,3 +257,19 @@ If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` AP
|
|||||||
## How do I manage the maximum number of requests the Ollama server can queue?
|
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||||
|
|
||||||
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||||
|
|
||||||
|
## How does Ollama handle concurrent requests?
|
||||||
|
|
||||||
|
Ollama supports two levels of concurrent processing. If your system has sufficient available memory (system memory when using CPU inference, or VRAM for GPU inference) then multiple models can be loaded at the same time. For a given model, if there is sufficient available memory when the model is loaded, it is configured to allow parallel request processing.
|
||||||
|
|
||||||
|
If there is insufficient available memory to load a new model request while one or more models are already loaded, all new requests will be queued until the new model can be loaded. As prior models become idle, one or more will be unloaded to make room for the new model. Queued requests will be processed in order. When using GPU inference new models must be able to completely fit in VRAM to allow concurrent model loads.
|
||||||
|
|
||||||
|
Parallel request processing for a given model results in increasing the context size by the number of parallel requests. For example, a 2K context with 4 parallel requests will result in an 8K context and additional memory allocation.
|
||||||
|
|
||||||
|
The following server settings may be used to adjust how Ollama handles concurrent requests on most platforms:
|
||||||
|
|
||||||
|
- `OLLAMA_MAX_LOADED_MODELS` - The maximum number of models that can be loaded concurrently provided they fit in available memory. The default is 3 * the number of GPUs or 3 for CPU inference.
|
||||||
|
- `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory.
|
||||||
|
- `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512
|
||||||
|
|
||||||
|
Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6.2 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM.
|
@@ -8,7 +8,7 @@ Check your compute compatibility to see if your card is supported:
|
|||||||
| Compute Capability | Family | Cards |
|
| Compute Capability | Family | Cards |
|
||||||
| ------------------ | ------------------- | ----------------------------------------------------------------------------------------------------------- |
|
| ------------------ | ------------------- | ----------------------------------------------------------------------------------------------------------- |
|
||||||
| 9.0 | NVIDIA | `H100` |
|
| 9.0 | NVIDIA | `H100` |
|
||||||
| 8.9 | GeForce RTX 40xx | `RTX 4090` `RTX 4080` `RTX 4070 Ti` `RTX 4060 Ti` |
|
| 8.9 | GeForce RTX 40xx | `RTX 4090` `RTX 4080 SUPER` `RTX 4080` `RTX 4070 Ti SUPER` `RTX 4070 Ti` `RTX 4070 SUPER` `RTX 4070` `RTX 4060 Ti` `RTX 4060` |
|
||||||
| | NVIDIA Professional | `L4` `L40` `RTX 6000` |
|
| | NVIDIA Professional | `L4` `L40` `RTX 6000` |
|
||||||
| 8.6 | GeForce RTX 30xx | `RTX 3090 Ti` `RTX 3090` `RTX 3080 Ti` `RTX 3080` `RTX 3070 Ti` `RTX 3070` `RTX 3060 Ti` `RTX 3060` |
|
| 8.6 | GeForce RTX 30xx | `RTX 3090 Ti` `RTX 3090` `RTX 3080 Ti` `RTX 3080` `RTX 3070 Ti` `RTX 3070` `RTX 3060 Ti` `RTX 3060` |
|
||||||
| | NVIDIA Professional | `A40` `RTX A6000` `RTX A5000` `RTX A4000` `RTX A3000` `RTX A2000` `A10` `A16` `A2` |
|
| | NVIDIA Professional | `A40` `RTX A6000` `RTX A5000` `RTX A4000` `RTX A3000` `RTX A2000` `A10` `A16` `A2` |
|
||||||
@@ -18,7 +18,7 @@ Check your compute compatibility to see if your card is supported:
|
|||||||
| | Quadro | `RTX 8000` `RTX 6000` `RTX 5000` `RTX 4000` |
|
| | Quadro | `RTX 8000` `RTX 6000` `RTX 5000` `RTX 4000` |
|
||||||
| 7.0 | NVIDIA | `TITAN V` `V100` `Quadro GV100` |
|
| 7.0 | NVIDIA | `TITAN V` `V100` `Quadro GV100` |
|
||||||
| 6.1 | NVIDIA TITAN | `TITAN Xp` `TITAN X` |
|
| 6.1 | NVIDIA TITAN | `TITAN Xp` `TITAN X` |
|
||||||
| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050` |
|
| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050 Ti` `GTX 1050` |
|
||||||
| | Quadro | `P6000` `P5200` `P4200` `P3200` `P5000` `P4000` `P3000` `P2200` `P2000` `P1000` `P620` `P600` `P500` `P520` |
|
| | Quadro | `P6000` `P5200` `P4200` `P3200` `P5000` `P4000` `P3000` `P2200` `P2000` `P1000` `P620` `P600` `P500` `P520` |
|
||||||
| | Tesla | `P40` `P4` |
|
| | Tesla | `P40` `P4` |
|
||||||
| 6.0 | NVIDIA | `Tesla P100` `Quadro GP100` |
|
| 6.0 | NVIDIA | `Tesla P100` `Quadro GP100` |
|
||||||
|
216
docs/import.md
216
docs/import.md
@@ -1,170 +1,88 @@
|
|||||||
# Import a model
|
# Import
|
||||||
|
|
||||||
This guide walks through importing a GGUF, PyTorch or Safetensors model.
|
GGUF models and select Safetensors models can be imported directly into Ollama.
|
||||||
|
|
||||||
## Importing (GGUF)
|
## Import GGUF
|
||||||
|
|
||||||
### Step 1: Write a `Modelfile`
|
A binary GGUF file can be imported directly into Ollama through a Modelfile.
|
||||||
|
|
||||||
Start by creating a `Modelfile`. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more.
|
```dockerfile
|
||||||
|
FROM /path/to/file.gguf
|
||||||
```
|
|
||||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
|
||||||
```
|
```
|
||||||
|
|
||||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
## Import Safetensors
|
||||||
|
|
||||||
```
|
If the model being imported is one of these architectures, it can be imported directly into Ollama through a Modelfile:
|
||||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
|
||||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
- LlamaForCausalLM
|
||||||
|
- MistralForCausalLM
|
||||||
|
- GemmaForCausalLM
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM /path/to/safetensors/directory
|
||||||
```
|
```
|
||||||
|
|
||||||
### Step 2: Create the Ollama model
|
For architectures not directly convertable by Ollama, see llama.cpp's [guide](https://github.com/ggerganov/llama.cpp/blob/master/README.md#prepare-and-quantize) on conversion. After conversion, see [Import GGUF](#import-gguf).
|
||||||
|
|
||||||
Finally, create a model from your `Modelfile`:
|
## Automatic Quantization
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Automatic quantization requires v0.1.35 or higher.
|
||||||
|
|
||||||
|
Ollama is capable of quantizing FP16 or FP32 models to any of the supported quantizations with the `-q/--quantize` flag in `ollama create`.
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM /path/to/my/gemma/f16/model
|
||||||
```
|
```
|
||||||
ollama create example -f Modelfile
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Run your model
|
|
||||||
|
|
||||||
Next, test the model with `ollama run`:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama run example "What is your favourite condiment?"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Importing (PyTorch & Safetensors)
|
|
||||||
|
|
||||||
> Importing from PyTorch and Safetensors is a longer process than importing from GGUF. Improvements that make it easier are a work in progress.
|
|
||||||
|
|
||||||
### Setup
|
|
||||||
|
|
||||||
First, clone the `ollama/ollama` repo:
|
|
||||||
|
|
||||||
```
|
|
||||||
git clone git@github.com:ollama/ollama.git ollama
|
|
||||||
cd ollama
|
|
||||||
```
|
|
||||||
|
|
||||||
and then fetch its `llama.cpp` submodule:
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
git submodule init
|
$ ollama create -q Q4_K_M mymodel
|
||||||
git submodule update llm/llama.cpp
|
transferring model data
|
||||||
|
quantizing F16 model to Q4_K_M
|
||||||
|
creating new layer sha256:735e246cc1abfd06e9cdcf95504d6789a6cd1ad7577108a70d9902fef503c1bd
|
||||||
|
creating new layer sha256:0853f0ad24e5865173bbf9ffcc7b0f5d56b66fd690ab1009867e45e7d2c4db0f
|
||||||
|
writing manifest
|
||||||
|
success
|
||||||
```
|
```
|
||||||
|
|
||||||
Next, install the Python dependencies:
|
### Supported Quantizations
|
||||||
|
|
||||||
```
|
- `Q4_0`
|
||||||
python3 -m venv llm/llama.cpp/.venv
|
- `Q4_1`
|
||||||
source llm/llama.cpp/.venv/bin/activate
|
- `Q5_0`
|
||||||
pip install -r llm/llama.cpp/requirements.txt
|
- `Q5_1`
|
||||||
|
- `Q8_0`
|
||||||
|
|
||||||
|
#### K-means Quantizations
|
||||||
|
|
||||||
|
- `Q3_K_S`
|
||||||
|
- `Q3_K_M`
|
||||||
|
- `Q3_K_L`
|
||||||
|
- `Q4_K_S`
|
||||||
|
- `Q4_K_M`
|
||||||
|
- `Q5_K_S`
|
||||||
|
- `Q5_K_M`
|
||||||
|
- `Q6_K`
|
||||||
|
|
||||||
|
## Template Detection
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> Template detection requires v0.1.42 or higher.
|
||||||
|
|
||||||
|
Ollama uses model metadata, specifically `tokenizer.chat_template`, to automatically create a template appropriate for the model you're importing.
|
||||||
|
|
||||||
|
```dockerfile
|
||||||
|
FROM /path/to/my/gemma/model
|
||||||
```
|
```
|
||||||
|
|
||||||
Then build the `quantize` tool:
|
```shell
|
||||||
|
$ ollama create mymodel
|
||||||
```
|
transferring model data
|
||||||
make -C llm/llama.cpp quantize
|
using autodetected template gemma-instruct
|
||||||
|
creating new layer sha256:baa2a0edc27d19cc6b7537578a9a7ba1a4e3214dc185ed5ae43692b319af7b84
|
||||||
|
creating new layer sha256:ba66c3309914dbef07e5149a648fd1877f030d337a4f240d444ea335008943cb
|
||||||
|
writing manifest
|
||||||
|
success
|
||||||
```
|
```
|
||||||
|
|
||||||
### Clone the HuggingFace repository (optional)
|
Defining a template in the Modelfile will disable this feature which may be useful if you want to use a different template than the autodetected one.
|
||||||
|
|
||||||
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
|
||||||
|
|
||||||
Install [Git LFS](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage), verify it's installed, and then clone the model's repository:
|
|
||||||
|
|
||||||
```
|
|
||||||
git lfs install
|
|
||||||
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1 model
|
|
||||||
```
|
|
||||||
|
|
||||||
### Convert the model
|
|
||||||
|
|
||||||
> Note: some model architectures require using specific convert scripts. For example, Qwen models require running `convert-hf-to-gguf.py` instead of `convert.py`
|
|
||||||
|
|
||||||
```
|
|
||||||
python llm/llama.cpp/convert.py ./model --outtype f16 --outfile converted.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
### Quantize the model
|
|
||||||
|
|
||||||
```
|
|
||||||
llm/llama.cpp/quantize converted.bin quantized.bin q4_0
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Write a `Modelfile`
|
|
||||||
|
|
||||||
Next, create a `Modelfile` for your model:
|
|
||||||
|
|
||||||
```
|
|
||||||
FROM quantized.bin
|
|
||||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Create the Ollama model
|
|
||||||
|
|
||||||
Finally, create a model from your `Modelfile`:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama create example -f Modelfile
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 5: Run your model
|
|
||||||
|
|
||||||
Next, test the model with `ollama run`:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama run example "What is your favourite condiment?"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Publishing your model (optional – early alpha)
|
|
||||||
|
|
||||||
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
|
||||||
|
|
||||||
1. Create [an account](https://ollama.com/signup)
|
|
||||||
2. Copy your Ollama public key:
|
|
||||||
- macOS: `cat ~/.ollama/id_ed25519.pub | pbcopy`
|
|
||||||
- Windows: `type %USERPROFILE%\.ollama\id_ed25519.pub`
|
|
||||||
- Linux: `cat /usr/share/ollama/.ollama/id_ed25519.pub`
|
|
||||||
3. Add your public key to your [Ollama account](https://ollama.com/settings/keys)
|
|
||||||
|
|
||||||
Next, copy your model to your username's namespace:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama cp example <your username>/example
|
|
||||||
```
|
|
||||||
|
|
||||||
> Note: model names may only contain lowercase letters, digits, and the characters `.`, `-`, and `_`.
|
|
||||||
|
|
||||||
Then push the model:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama push <your username>/example
|
|
||||||
```
|
|
||||||
|
|
||||||
After publishing, your model will be available at `https://ollama.com/<your username>/example`.
|
|
||||||
|
|
||||||
## Quantization reference
|
|
||||||
|
|
||||||
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
|
|
||||||
|
|
||||||
- `q2_K`
|
|
||||||
- `q3_K`
|
|
||||||
- `q3_K_S`
|
|
||||||
- `q3_K_M`
|
|
||||||
- `q3_K_L`
|
|
||||||
- `q4_0` (recommended)
|
|
||||||
- `q4_1`
|
|
||||||
- `q4_K`
|
|
||||||
- `q4_K_S`
|
|
||||||
- `q4_K_M`
|
|
||||||
- `q5_0`
|
|
||||||
- `q5_1`
|
|
||||||
- `q5_K`
|
|
||||||
- `q5_K_S`
|
|
||||||
- `q5_K_M`
|
|
||||||
- `q6_K`
|
|
||||||
- `q8_0`
|
|
||||||
- `f16`
|
|
||||||
|
@@ -100,6 +100,16 @@ sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
|||||||
sudo chmod +x /usr/bin/ollama
|
sudo chmod +x /usr/bin/ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Installing specific versions
|
||||||
|
|
||||||
|
Use `OLLAMA_VERSION` environment variable with the install script to install a specific version of Ollama, including pre-releases. You can find the version numbers in the [releases page](https://github.com/ollama/ollama/releases).
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.1.32 sh
|
||||||
|
```
|
||||||
|
|
||||||
## Viewing logs
|
## Viewing logs
|
||||||
|
|
||||||
To view logs of Ollama running as a startup service, run:
|
To view logs of Ollama running as a startup service, run:
|
||||||
|
@@ -65,6 +65,7 @@ curl http://localhost:11434/v1/chat/completions \
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
}'
|
}'
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Endpoints
|
## Endpoints
|
||||||
@@ -104,8 +105,6 @@ curl http://localhost:11434/v1/chat/completions \
|
|||||||
|
|
||||||
#### Notes
|
#### Notes
|
||||||
|
|
||||||
- Setting `seed` will always set `temperature` to `0`
|
|
||||||
- `finish_reason` will always be `stop`
|
|
||||||
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
||||||
|
|
||||||
## Models
|
## Models
|
||||||
|
@@ -22,7 +22,7 @@ docker logs <container-name>
|
|||||||
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||||
|
|
||||||
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
||||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
- `explorer %LOCALAPPDATA%\Ollama` to view logs. The most recent server logs will be in `server.log` and older logs will be in `server-#.log`
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
||||||
@@ -70,13 +70,18 @@ curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
|||||||
|
|
||||||
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
||||||
|
|
||||||
## Container fails to run on NVIDIA GPU
|
## NVIDIA GPU Discovery
|
||||||
|
|
||||||
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
When Ollama starts up, it takes inventory of the GPUs present in the system to determine compatibility and how much VRAM is available. Sometimes this discovery can fail to find your GPUs. In general, running the latest driver will yield the best results.
|
||||||
|
|
||||||
Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
### Linux NVIDIA Troubleshooting
|
||||||
|
|
||||||
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
If you are using a container to run Ollama, make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||||
|
|
||||||
|
Sometimes the Ollama can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||||
|
|
||||||
|
- If you are using a container, is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU.
|
||||||
|
- Is the uvm driver loaded? `sudo nvidia-modprobe -u`
|
||||||
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||||
- Try rebooting
|
- Try rebooting
|
||||||
- Make sure you're running the latest nvidia drivers
|
- Make sure you're running the latest nvidia drivers
|
||||||
@@ -84,3 +89,8 @@ Sometimes the container runtime can have difficulties initializing the GPU. When
|
|||||||
If none of those resolve the problem, gather additional information and file an issue:
|
If none of those resolve the problem, gather additional information and file an issue:
|
||||||
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
||||||
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
||||||
|
|
||||||
|
|
||||||
|
## Windows Terminal Errors
|
||||||
|
|
||||||
|
Older versions of Windows 10 (e.g., 21H1) are known to have a bug where the standard terminal program does not display control characters correctly. This can result in a long string of strings like `←[?25h←[?25l` being displayed, sometimes erroring with `The parameter is incorrect` To resolve this problem, please update to Win 10 22H1 or newer.
|
||||||
|
@@ -45,7 +45,7 @@ all_splits = text_splitter.split_documents(data)
|
|||||||
```
|
```
|
||||||
|
|
||||||
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install chromadb`
|
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install chromadb`
|
||||||
|
We also need to pull embedding model: `ollama pull nomic-embed-text`
|
||||||
```python
|
```python
|
||||||
from langchain.embeddings import OllamaEmbeddings
|
from langchain.embeddings import OllamaEmbeddings
|
||||||
from langchain.vectorstores import Chroma
|
from langchain.vectorstores import Chroma
|
||||||
@@ -68,7 +68,8 @@ The next thing is to send the question and the relevant parts of the docs to the
|
|||||||
```python
|
```python
|
||||||
from langchain.chains import RetrievalQA
|
from langchain.chains import RetrievalQA
|
||||||
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
|
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
|
||||||
qachain.invoke({"query": question})
|
res = qachain.invoke({"query": question})
|
||||||
|
print(res['result'])
|
||||||
```
|
```
|
||||||
|
|
||||||
The answer received from this chain was:
|
The answer received from this chain was:
|
||||||
|
@@ -19,7 +19,7 @@ Logs will often be helpful in diagnosing the problem (see
|
|||||||
|
|
||||||
## System Requirements
|
## System Requirements
|
||||||
|
|
||||||
* Windows 10 or newer, Home or Pro
|
* Windows 10 22H2 or newer, Home or Pro
|
||||||
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
||||||
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
||||||
|
|
||||||
@@ -39,8 +39,8 @@ server.
|
|||||||
Ollama on Windows stores files in a few different locations. You can view them in
|
Ollama on Windows stores files in a few different locations. You can view them in
|
||||||
the explorer window by hitting `<cmd>+R` and type in:
|
the explorer window by hitting `<cmd>+R` and type in:
|
||||||
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
||||||
- *app.log* contains logs from the GUI application
|
- *app.log* contains most resent logs from the GUI application
|
||||||
- *server.log* contains the server logs
|
- *server.log* contains the most recent server logs
|
||||||
- *upgrade.log* contains log output for upgrades
|
- *upgrade.log* contains log output for upgrades
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
||||||
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
||||||
|
@@ -1,15 +1,31 @@
|
|||||||
package envconfig
|
package envconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type OllamaHost struct {
|
||||||
|
Scheme string
|
||||||
|
Host string
|
||||||
|
Port string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o OllamaHost) String() string {
|
||||||
|
return fmt.Sprintf("%s://%s:%s", o.Scheme, o.Host, o.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Set via OLLAMA_ORIGINS in the environment
|
// Set via OLLAMA_ORIGINS in the environment
|
||||||
AllowOrigins []string
|
AllowOrigins []string
|
||||||
@@ -17,8 +33,10 @@ var (
|
|||||||
Debug bool
|
Debug bool
|
||||||
// Experimental flash attention
|
// Experimental flash attention
|
||||||
FlashAttention bool
|
FlashAttention bool
|
||||||
|
// Set via OLLAMA_HOST in the environment
|
||||||
|
Host *OllamaHost
|
||||||
// Set via OLLAMA_KEEP_ALIVE in the environment
|
// Set via OLLAMA_KEEP_ALIVE in the environment
|
||||||
KeepAlive string
|
KeepAlive time.Duration
|
||||||
// Set via OLLAMA_LLM_LIBRARY in the environment
|
// Set via OLLAMA_LLM_LIBRARY in the environment
|
||||||
LLMLibrary string
|
LLMLibrary string
|
||||||
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
||||||
@@ -27,6 +45,8 @@ var (
|
|||||||
MaxQueuedRequests int
|
MaxQueuedRequests int
|
||||||
// Set via OLLAMA_MAX_VRAM in the environment
|
// Set via OLLAMA_MAX_VRAM in the environment
|
||||||
MaxVRAM uint64
|
MaxVRAM uint64
|
||||||
|
// Set via OLLAMA_MODELS in the environment
|
||||||
|
ModelsDir string
|
||||||
// Set via OLLAMA_NOHISTORY in the environment
|
// Set via OLLAMA_NOHISTORY in the environment
|
||||||
NoHistory bool
|
NoHistory bool
|
||||||
// Set via OLLAMA_NOPRUNE in the environment
|
// Set via OLLAMA_NOPRUNE in the environment
|
||||||
@@ -35,8 +55,23 @@ var (
|
|||||||
NumParallel int
|
NumParallel int
|
||||||
// Set via OLLAMA_RUNNERS_DIR in the environment
|
// Set via OLLAMA_RUNNERS_DIR in the environment
|
||||||
RunnersDir string
|
RunnersDir string
|
||||||
|
// Set via OLLAMA_SCHED_SPREAD in the environment
|
||||||
|
SchedSpread bool
|
||||||
// Set via OLLAMA_TMPDIR in the environment
|
// Set via OLLAMA_TMPDIR in the environment
|
||||||
TmpDir string
|
TmpDir string
|
||||||
|
// Set via OLLAMA_INTEL_GPU in the environment
|
||||||
|
IntelGpu bool
|
||||||
|
|
||||||
|
// Set via CUDA_VISIBLE_DEVICES in the environment
|
||||||
|
CudaVisibleDevices string
|
||||||
|
// Set via HIP_VISIBLE_DEVICES in the environment
|
||||||
|
HipVisibleDevices string
|
||||||
|
// Set via ROCR_VISIBLE_DEVICES in the environment
|
||||||
|
RocrVisibleDevices string
|
||||||
|
// Set via GPU_DEVICE_ORDINAL in the environment
|
||||||
|
GpuDeviceOrdinal string
|
||||||
|
// Set via HSA_OVERRIDE_GFX_VERSION in the environment
|
||||||
|
HsaOverrideGfxVersion string
|
||||||
)
|
)
|
||||||
|
|
||||||
type EnvVar struct {
|
type EnvVar struct {
|
||||||
@@ -46,23 +81,33 @@ type EnvVar struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func AsMap() map[string]EnvVar {
|
func AsMap() map[string]EnvVar {
|
||||||
return map[string]EnvVar{
|
ret := map[string]EnvVar{
|
||||||
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||||
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
|
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
|
||||||
"OLLAMA_HOST": {"OLLAMA_HOST", "", "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||||
"OLLAMA_LLM_LIBRARY": {"OLLAMA_ORIGINS", LLMLibrary, ""},
|
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
||||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU"},
|
||||||
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||||
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, ""},
|
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
||||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", "", "The path to the models directory"},
|
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
||||||
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||||
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests"},
|
||||||
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
||||||
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, ""},
|
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
||||||
|
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"},
|
||||||
"OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir, "Location for temporary files"},
|
"OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir, "Location for temporary files"},
|
||||||
}
|
}
|
||||||
|
if runtime.GOOS != "darwin" {
|
||||||
|
ret["CUDA_VISIBLE_DEVICES"] = EnvVar{"CUDA_VISIBLE_DEVICES", CudaVisibleDevices, "Set which NVIDIA devices are visible"}
|
||||||
|
ret["HIP_VISIBLE_DEVICES"] = EnvVar{"HIP_VISIBLE_DEVICES", HipVisibleDevices, "Set which AMD devices are visible"}
|
||||||
|
ret["ROCR_VISIBLE_DEVICES"] = EnvVar{"ROCR_VISIBLE_DEVICES", RocrVisibleDevices, "Set which AMD devices are visible"}
|
||||||
|
ret["GPU_DEVICE_ORDINAL"] = EnvVar{"GPU_DEVICE_ORDINAL", GpuDeviceOrdinal, "Set which AMD devices are visible"}
|
||||||
|
ret["HSA_OVERRIDE_GFX_VERSION"] = EnvVar{"HSA_OVERRIDE_GFX_VERSION", HsaOverrideGfxVersion, "Override the gfx used for all detected AMD GPUs"}
|
||||||
|
ret["OLLAMA_INTEL_GPU"] = EnvVar{"OLLAMA_INTEL_GPU", IntelGpu, "Enable experimental Intel GPU detection"}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func Values() map[string]string {
|
func Values() map[string]string {
|
||||||
@@ -86,9 +131,10 @@ func clean(key string) string {
|
|||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// default values
|
// default values
|
||||||
NumParallel = 1
|
NumParallel = 0 // Autoselect
|
||||||
MaxRunners = 1
|
MaxRunners = 0 // Autoselect
|
||||||
MaxQueuedRequests = 512
|
MaxQueuedRequests = 512
|
||||||
|
KeepAlive = 5 * time.Minute
|
||||||
|
|
||||||
LoadConfig()
|
LoadConfig()
|
||||||
}
|
}
|
||||||
@@ -126,7 +172,7 @@ func LoadConfig() {
|
|||||||
var paths []string
|
var paths []string
|
||||||
for _, root := range []string{filepath.Dir(appExe), cwd} {
|
for _, root := range []string{filepath.Dir(appExe), cwd} {
|
||||||
paths = append(paths,
|
paths = append(paths,
|
||||||
filepath.Join(root),
|
root,
|
||||||
filepath.Join(root, "windows-"+runtime.GOARCH),
|
filepath.Join(root, "windows-"+runtime.GOARCH),
|
||||||
filepath.Join(root, "dist", "windows-"+runtime.GOARCH),
|
filepath.Join(root, "dist", "windows-"+runtime.GOARCH),
|
||||||
)
|
)
|
||||||
@@ -162,8 +208,8 @@ func LoadConfig() {
|
|||||||
|
|
||||||
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
||||||
val, err := strconv.Atoi(onp)
|
val, err := strconv.Atoi(onp)
|
||||||
if err != nil || val <= 0 {
|
if err != nil {
|
||||||
slog.Error("invalid setting must be greater than zero", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
||||||
} else {
|
} else {
|
||||||
NumParallel = val
|
NumParallel = val
|
||||||
}
|
}
|
||||||
@@ -173,6 +219,15 @@ func LoadConfig() {
|
|||||||
NoHistory = true
|
NoHistory = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if spread := clean("OLLAMA_SCHED_SPREAD"); spread != "" {
|
||||||
|
s, err := strconv.ParseBool(spread)
|
||||||
|
if err == nil {
|
||||||
|
SchedSpread = s
|
||||||
|
} else {
|
||||||
|
SchedSpread = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if noprune := clean("OLLAMA_NOPRUNE"); noprune != "" {
|
if noprune := clean("OLLAMA_NOPRUNE"); noprune != "" {
|
||||||
NoPrune = true
|
NoPrune = true
|
||||||
}
|
}
|
||||||
@@ -184,16 +239,22 @@ func LoadConfig() {
|
|||||||
AllowOrigins = append(AllowOrigins,
|
AllowOrigins = append(AllowOrigins,
|
||||||
fmt.Sprintf("http://%s", allowOrigin),
|
fmt.Sprintf("http://%s", allowOrigin),
|
||||||
fmt.Sprintf("https://%s", allowOrigin),
|
fmt.Sprintf("https://%s", allowOrigin),
|
||||||
fmt.Sprintf("http://%s:*", allowOrigin),
|
fmt.Sprintf("http://%s", net.JoinHostPort(allowOrigin, "*")),
|
||||||
fmt.Sprintf("https://%s:*", allowOrigin),
|
fmt.Sprintf("https://%s", net.JoinHostPort(allowOrigin, "*")),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AllowOrigins = append(AllowOrigins,
|
||||||
|
"app://*",
|
||||||
|
"file://*",
|
||||||
|
"tauri://*",
|
||||||
|
)
|
||||||
|
|
||||||
maxRunners := clean("OLLAMA_MAX_LOADED_MODELS")
|
maxRunners := clean("OLLAMA_MAX_LOADED_MODELS")
|
||||||
if maxRunners != "" {
|
if maxRunners != "" {
|
||||||
m, err := strconv.Atoi(maxRunners)
|
m, err := strconv.Atoi(maxRunners)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("invalid setting", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
||||||
} else {
|
} else {
|
||||||
MaxRunners = m
|
MaxRunners = m
|
||||||
}
|
}
|
||||||
@@ -202,11 +263,111 @@ func LoadConfig() {
|
|||||||
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
||||||
p, err := strconv.Atoi(onp)
|
p, err := strconv.Atoi(onp)
|
||||||
if err != nil || p <= 0 {
|
if err != nil || p <= 0 {
|
||||||
slog.Error("invalid setting", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
||||||
} else {
|
} else {
|
||||||
MaxQueuedRequests = p
|
MaxQueuedRequests = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KeepAlive = clean("OLLAMA_KEEP_ALIVE")
|
ka := clean("OLLAMA_KEEP_ALIVE")
|
||||||
|
if ka != "" {
|
||||||
|
loadKeepAlive(ka)
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
ModelsDir, err = getModelsDir()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_MODELS", ModelsDir, "error", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
Host, err = getOllamaHost()
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("invalid setting", "OLLAMA_HOST", Host, "error", err, "using default port", Host.Port)
|
||||||
|
}
|
||||||
|
|
||||||
|
if set, err := strconv.ParseBool(clean("OLLAMA_INTEL_GPU")); err == nil {
|
||||||
|
IntelGpu = set
|
||||||
|
}
|
||||||
|
|
||||||
|
CudaVisibleDevices = clean("CUDA_VISIBLE_DEVICES")
|
||||||
|
HipVisibleDevices = clean("HIP_VISIBLE_DEVICES")
|
||||||
|
RocrVisibleDevices = clean("ROCR_VISIBLE_DEVICES")
|
||||||
|
GpuDeviceOrdinal = clean("GPU_DEVICE_ORDINAL")
|
||||||
|
HsaOverrideGfxVersion = clean("HSA_OVERRIDE_GFX_VERSION")
|
||||||
|
}
|
||||||
|
|
||||||
|
func getModelsDir() (string, error) {
|
||||||
|
if models, exists := os.LookupEnv("OLLAMA_MODELS"); exists {
|
||||||
|
return models, nil
|
||||||
|
}
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return filepath.Join(home, ".ollama", "models"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOllamaHost() (*OllamaHost, error) {
|
||||||
|
defaultPort := "11434"
|
||||||
|
|
||||||
|
hostVar := os.Getenv("OLLAMA_HOST")
|
||||||
|
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
|
||||||
|
|
||||||
|
scheme, hostport, ok := strings.Cut(hostVar, "://")
|
||||||
|
switch {
|
||||||
|
case !ok:
|
||||||
|
scheme, hostport = "http", hostVar
|
||||||
|
case scheme == "http":
|
||||||
|
defaultPort = "80"
|
||||||
|
case scheme == "https":
|
||||||
|
defaultPort = "443"
|
||||||
|
}
|
||||||
|
|
||||||
|
// trim trailing slashes
|
||||||
|
hostport = strings.TrimRight(hostport, "/")
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(hostport)
|
||||||
|
if err != nil {
|
||||||
|
host, port = "127.0.0.1", defaultPort
|
||||||
|
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
||||||
|
host = ip.String()
|
||||||
|
} else if hostport != "" {
|
||||||
|
host = hostport
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
|
||||||
|
return &OllamaHost{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: host,
|
||||||
|
Port: defaultPort,
|
||||||
|
}, ErrInvalidHostPort
|
||||||
|
}
|
||||||
|
|
||||||
|
return &OllamaHost{
|
||||||
|
Scheme: scheme,
|
||||||
|
Host: host,
|
||||||
|
Port: port,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadKeepAlive(ka string) {
|
||||||
|
v, err := strconv.Atoi(ka)
|
||||||
|
if err != nil {
|
||||||
|
d, err := time.ParseDuration(ka)
|
||||||
|
if err == nil {
|
||||||
|
if d < 0 {
|
||||||
|
KeepAlive = time.Duration(math.MaxInt64)
|
||||||
|
} else {
|
||||||
|
KeepAlive = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d := time.Duration(v) * time.Second
|
||||||
|
if d < 0 {
|
||||||
|
KeepAlive = time.Duration(math.MaxInt64)
|
||||||
|
} else {
|
||||||
|
KeepAlive = d
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,8 +1,13 @@
|
|||||||
package envconfig
|
package envconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,4 +25,64 @@ func TestConfig(t *testing.T) {
|
|||||||
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
||||||
LoadConfig()
|
LoadConfig()
|
||||||
require.True(t, FlashAttention)
|
require.True(t, FlashAttention)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, 5*time.Minute, KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "3")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, 3*time.Second, KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "1h")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, 1*time.Hour, KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "-1s")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, time.Duration(math.MaxInt64), KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "-1")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, time.Duration(math.MaxInt64), KeepAlive)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientFromEnvironment(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
value string
|
||||||
|
expect string
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
hostTestCases := map[string]*testCase{
|
||||||
|
"empty": {value: "", expect: "127.0.0.1:11434"},
|
||||||
|
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
|
||||||
|
"only port": {value: ":1234", expect: ":1234"},
|
||||||
|
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
|
||||||
|
"hostname": {value: "example.com", expect: "example.com:11434"},
|
||||||
|
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
|
||||||
|
"zero port": {value: ":0", expect: ":0"},
|
||||||
|
"too large port": {value: ":66000", err: ErrInvalidHostPort},
|
||||||
|
"too small port": {value: ":-1", err: ErrInvalidHostPort},
|
||||||
|
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
|
||||||
|
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
|
||||||
|
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
|
||||||
|
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
|
||||||
|
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
|
||||||
|
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
|
||||||
|
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
|
||||||
|
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range hostTestCases {
|
||||||
|
t.Run(k, func(t *testing.T) {
|
||||||
|
t.Setenv("OLLAMA_HOST", v.value)
|
||||||
|
LoadConfig()
|
||||||
|
|
||||||
|
oh, err := getOllamaHost()
|
||||||
|
if err != v.err {
|
||||||
|
t.Fatalf("expected %s, got %s", v.err, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
host := net.JoinHostPort(oh.Host, oh.Port)
|
||||||
|
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@@ -77,13 +77,21 @@ LOADER_MAPPING = {
|
|||||||
|
|
||||||
|
|
||||||
def load_single_document(file_path: str) -> List[Document]:
|
def load_single_document(file_path: str) -> List[Document]:
|
||||||
ext = "." + file_path.rsplit(".", 1)[-1]
|
if os.path.getsize(file_path) != 0:
|
||||||
|
filename, ext = os.path.splitext(file_path)
|
||||||
if ext in LOADER_MAPPING:
|
if ext in LOADER_MAPPING:
|
||||||
loader_class, loader_args = LOADER_MAPPING[ext]
|
loader_class, loader_args = LOADER_MAPPING[ext]
|
||||||
|
try:
|
||||||
loader = loader_class(file_path, **loader_args)
|
loader = loader_class(file_path, **loader_args)
|
||||||
|
if loader:
|
||||||
return loader.load()
|
return loader.load()
|
||||||
|
except:
|
||||||
|
print(f"Corrupted file {file_path}. Ignoring it.")
|
||||||
|
else:
|
||||||
|
print(f"Unsupported file {file_path}. Ignoring it.")
|
||||||
|
else:
|
||||||
|
print(f"Empty file {file_path}. Ignoring it.")
|
||||||
|
|
||||||
raise ValueError(f"Unsupported file extension '{ext}'")
|
|
||||||
|
|
||||||
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
|
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
|
||||||
"""
|
"""
|
||||||
@@ -100,6 +108,7 @@ def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Docum
|
|||||||
results = []
|
results = []
|
||||||
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
|
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
|
||||||
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
|
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
|
||||||
|
if docs:
|
||||||
results.extend(docs)
|
results.extend(docs)
|
||||||
pbar.update()
|
pbar.update()
|
||||||
|
|
||||||
|
@@ -12,3 +12,4 @@ pandoc==2.3
|
|||||||
pypandoc==1.11
|
pypandoc==1.11
|
||||||
tqdm==4.66.1
|
tqdm==4.66.1
|
||||||
sentence_transformers==2.2.2
|
sentence_transformers==2.2.2
|
||||||
|
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
@@ -5,7 +5,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestHumanNumber(t *testing.T) {
|
func TestHumanNumber(t *testing.T) {
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
input uint64
|
input uint64
|
||||||
expected string
|
expected string
|
||||||
|
4
go.mod
4
go.mod
@@ -16,7 +16,9 @@ require (
|
|||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/agnivade/levenshtein v1.1.1
|
||||||
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
||||||
|
github.com/google/go-cmp v0.6.0
|
||||||
github.com/mattn/go-runewidth v0.0.14
|
github.com/mattn/go-runewidth v0.0.14
|
||||||
github.com/nlpodyssey/gopickle v0.3.0
|
github.com/nlpodyssey/gopickle v0.3.0
|
||||||
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c
|
||||||
@@ -70,7 +72,7 @@ require (
|
|||||||
golang.org/x/net v0.25.0 // indirect
|
golang.org/x/net v0.25.0 // indirect
|
||||||
golang.org/x/sys v0.20.0
|
golang.org/x/sys v0.20.0
|
||||||
golang.org/x/term v0.20.0
|
golang.org/x/term v0.20.0
|
||||||
golang.org/x/text v0.15.0 // indirect
|
golang.org/x/text v0.15.0
|
||||||
google.golang.org/protobuf v1.34.1
|
google.golang.org/protobuf v1.34.1
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
6
go.sum
6
go.sum
@@ -4,10 +4,14 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
|||||||
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
|
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
|
||||||
|
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ=
|
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ=
|
||||||
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
|
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
|
||||||
|
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||||
|
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||||
@@ -36,6 +40,8 @@ github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1/go.mod h1:uw2gLc
|
|||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
|
||||||
|
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
|
@@ -49,9 +49,17 @@ func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func commonAMDValidateLibDir() (string, error) {
|
func commonAMDValidateLibDir() (string, error) {
|
||||||
// We try to favor system paths first, so that we can wire up the subprocess to use
|
// Favor our bundled version
|
||||||
// the system version. Only use our bundled version if the system version doesn't work
|
|
||||||
// This gives users a more recovery options if versions have subtle problems at runtime
|
// Installer payload location if we're running the installed binary
|
||||||
|
exe, err := os.Executable()
|
||||||
|
if err == nil {
|
||||||
|
rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm")
|
||||||
|
if rocmLibUsable(rocmTargetDir) {
|
||||||
|
slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir)
|
||||||
|
return rocmTargetDir, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Prefer explicit HIP env var
|
// Prefer explicit HIP env var
|
||||||
hipPath := os.Getenv("HIP_PATH")
|
hipPath := os.Getenv("HIP_PATH")
|
||||||
@@ -87,14 +95,5 @@ func commonAMDValidateLibDir() (string, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Installer payload location if we're running the installed binary
|
|
||||||
exe, err := os.Executable()
|
|
||||||
if err == nil {
|
|
||||||
rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm")
|
|
||||||
if rocmLibUsable(rocmTargetDir) {
|
|
||||||
slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir)
|
|
||||||
return rocmTargetDir, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
||||||
}
|
}
|
||||||
|
@@ -84,9 +84,8 @@ func (hl *HipLib) AMDDriverVersion() (driverMajor, driverMinor int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("hipDriverGetVersion", "version", version)
|
slog.Debug("hipDriverGetVersion", "version", version)
|
||||||
// TODO - this isn't actually right, but the docs claim hipDriverGetVersion isn't accurate anyway...
|
driverMajor = version / 10000000
|
||||||
driverMajor = version / 1000
|
driverMinor = (version - (driverMajor * 10000000)) / 100000
|
||||||
driverMinor = (version - (driverMajor * 1000)) / 10
|
|
||||||
|
|
||||||
return driverMajor, driverMinor, nil
|
return driverMajor, driverMinor, nil
|
||||||
}
|
}
|
||||||
|
198
gpu/amd_linux.go
198
gpu/amd_linux.go
@@ -13,6 +13,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -25,7 +26,16 @@ const (
|
|||||||
|
|
||||||
// Prefix with the node dir
|
// Prefix with the node dir
|
||||||
GPUTotalMemoryFileGlob = "mem_banks/*/properties" // size_in_bytes line
|
GPUTotalMemoryFileGlob = "mem_banks/*/properties" // size_in_bytes line
|
||||||
GPUUsedMemoryFileGlob = "mem_banks/*/used_memory"
|
|
||||||
|
// Direct Rendering Manager sysfs location
|
||||||
|
DRMDeviceDirGlob = "/sys/class/drm/card*/device"
|
||||||
|
DRMTotalMemoryFile = "mem_info_vram_total"
|
||||||
|
DRMUsedMemoryFile = "mem_info_vram_used"
|
||||||
|
|
||||||
|
// In hex; properties file is in decimal
|
||||||
|
DRMUniqueIDFile = "unique_id"
|
||||||
|
DRMVendorFile = "vendor"
|
||||||
|
DRMDeviceFile = "device"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -35,8 +45,8 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Gather GPU information from the amdgpu driver if any supported GPUs are detected
|
// Gather GPU information from the amdgpu driver if any supported GPUs are detected
|
||||||
func AMDGetGPUInfo() []GpuInfo {
|
func AMDGetGPUInfo() []RocmGPUInfo {
|
||||||
resp := []GpuInfo{}
|
resp := []RocmGPUInfo{}
|
||||||
if !AMDDetected() {
|
if !AMDDetected() {
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
@@ -50,9 +60,9 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
|
|
||||||
// Determine if the user has already pre-selected which GPUs to look at, then ignore the others
|
// Determine if the user has already pre-selected which GPUs to look at, then ignore the others
|
||||||
var visibleDevices []string
|
var visibleDevices []string
|
||||||
hipVD := os.Getenv("HIP_VISIBLE_DEVICES") // zero based index only
|
hipVD := envconfig.HipVisibleDevices // zero based index only
|
||||||
rocrVD := os.Getenv("ROCR_VISIBLE_DEVICES") // zero based index or UUID, but consumer cards seem to not support UUID
|
rocrVD := envconfig.RocrVisibleDevices // zero based index or UUID, but consumer cards seem to not support UUID
|
||||||
gpuDO := os.Getenv("GPU_DEVICE_ORDINAL") // zero based index
|
gpuDO := envconfig.GpuDeviceOrdinal // zero based index
|
||||||
switch {
|
switch {
|
||||||
// TODO is this priorty order right?
|
// TODO is this priorty order right?
|
||||||
case hipVD != "":
|
case hipVD != "":
|
||||||
@@ -65,7 +75,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
visibleDevices = strings.Split(gpuDO, ",")
|
visibleDevices = strings.Split(gpuDO, ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
gfxOverride := os.Getenv("HSA_OVERRIDE_GFX_VERSION")
|
gfxOverride := envconfig.HsaOverrideGfxVersion
|
||||||
var supported []string
|
var supported []string
|
||||||
libDir := ""
|
libDir := ""
|
||||||
|
|
||||||
@@ -90,7 +100,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
scanner := bufio.NewScanner(fp)
|
scanner := bufio.NewScanner(fp)
|
||||||
isCPU := false
|
isCPU := false
|
||||||
var major, minor, patch uint64
|
var major, minor, patch uint64
|
||||||
var vendor, device uint64
|
var vendor, device, uniqueID uint64
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := strings.TrimSpace(scanner.Text())
|
line := strings.TrimSpace(scanner.Text())
|
||||||
// Note: we could also use "cpu_cores_count X" where X is greater than zero to detect CPUs
|
// Note: we could also use "cpu_cores_count X" where X is greater than zero to detect CPUs
|
||||||
@@ -121,30 +131,43 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
} else if strings.HasPrefix(line, "vendor_id") {
|
} else if strings.HasPrefix(line, "vendor_id") {
|
||||||
ver := strings.Fields(line)
|
ver := strings.Fields(line)
|
||||||
if len(ver) != 2 {
|
if len(ver) != 2 {
|
||||||
slog.Debug("malformed vendor_id", "vendor_id", line)
|
slog.Debug("malformed", "vendor_id", line)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
vendor, err = strconv.ParseUint(ver[1], 10, 32)
|
vendor, err = strconv.ParseUint(ver[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug("malformed vendor_id" + line)
|
slog.Debug("malformed", "vendor_id", line, "error", err)
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(line, "device_id") {
|
} else if strings.HasPrefix(line, "device_id") {
|
||||||
ver := strings.Fields(line)
|
ver := strings.Fields(line)
|
||||||
if len(ver) != 2 {
|
if len(ver) != 2 {
|
||||||
slog.Debug("malformed device_id", "device_id", line)
|
slog.Debug("malformed", "device_id", line)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
device, err = strconv.ParseUint(ver[1], 10, 32)
|
device, err = strconv.ParseUint(ver[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug("malformed device_id" + line)
|
slog.Debug("malformed", "device_id", line, "error", err)
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(line, "unique_id") {
|
||||||
|
ver := strings.Fields(line)
|
||||||
|
if len(ver) != 2 {
|
||||||
|
slog.Debug("malformed", "unique_id", line)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
uniqueID, err = strconv.ParseUint(ver[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("malformed", "unique_id", line, "error", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO - any other properties we want to extract and record?
|
// TODO - any other properties we want to extract and record?
|
||||||
// vendor_id + device_id -> pci lookup for "Name"
|
// vendor_id + device_id -> pci lookup for "Name"
|
||||||
// Other metrics that may help us understand relative performance between multiple GPUs
|
// Other metrics that may help us understand relative performance between multiple GPUs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Note: while ./mem_banks/*/used_memory exists, it doesn't appear to take other VRAM consumers
|
||||||
|
// into consideration, so we instead map the device over to the DRM driver sysfs nodes which
|
||||||
|
// do reliably report VRAM usage.
|
||||||
|
|
||||||
if isCPU {
|
if isCPU {
|
||||||
cpuCount++
|
cpuCount++
|
||||||
continue
|
continue
|
||||||
@@ -156,7 +179,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
// Shouldn't happen, but just in case...
|
// Shouldn't happen, but just in case...
|
||||||
if gpuID < 0 {
|
if gpuID < 0 {
|
||||||
slog.Error("unexpected amdgpu sysfs data resulted in negative GPU ID, please set OLLAMA_DEBUG=1 and report an issue")
|
slog.Error("unexpected amdgpu sysfs data resulted in negative GPU ID, please set OLLAMA_DEBUG=1 and report an issue")
|
||||||
return []GpuInfo{}
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if int(major) < RocmComputeMin {
|
if int(major) < RocmComputeMin {
|
||||||
@@ -167,65 +190,68 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
// Look up the memory for the current node
|
// Look up the memory for the current node
|
||||||
totalMemory := uint64(0)
|
totalMemory := uint64(0)
|
||||||
usedMemory := uint64(0)
|
usedMemory := uint64(0)
|
||||||
propGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(nodeID), GPUTotalMemoryFileGlob)
|
var usedFile string
|
||||||
propFiles, err := filepath.Glob(propGlob)
|
mapping := []struct {
|
||||||
|
id uint64
|
||||||
|
filename string
|
||||||
|
}{
|
||||||
|
{vendor, DRMVendorFile},
|
||||||
|
{device, DRMDeviceFile},
|
||||||
|
{uniqueID, DRMUniqueIDFile}, // Not all devices will report this
|
||||||
|
}
|
||||||
|
slog.Debug("mapping amdgpu to drm sysfs nodes", "amdgpu", match, "vendor", vendor, "device", device, "unique_id", uniqueID)
|
||||||
|
// Map over to DRM location to find the total/free memory
|
||||||
|
drmMatches, _ := filepath.Glob(DRMDeviceDirGlob)
|
||||||
|
for _, devDir := range drmMatches {
|
||||||
|
matched := true
|
||||||
|
for _, m := range mapping {
|
||||||
|
if m.id == 0 {
|
||||||
|
// Null ID means it didn't populate, so we can't use it to match
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filename := filepath.Join(devDir, m.filename)
|
||||||
|
buf, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("error looking up total GPU memory", "glob", propGlob, "error", err)
|
slog.Debug("failed to read sysfs node", "file", filename, "error", err)
|
||||||
|
matched = false
|
||||||
|
break
|
||||||
}
|
}
|
||||||
// 1 or more memory banks - sum the values of all of them
|
// values here are in hex, strip off the lead 0x and parse so we can compare the numeric (decimal) values in amdgpu
|
||||||
for _, propFile := range propFiles {
|
cmp, err := strconv.ParseUint(strings.TrimPrefix(strings.TrimSpace(string(buf)), "0x"), 16, 64)
|
||||||
fp, err := os.Open(propFile)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("failed to open sysfs node", "file", propFile, "erroir", err)
|
slog.Debug("failed to parse sysfs node", "file", filename, "error", err)
|
||||||
|
matched = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if cmp != m.id {
|
||||||
|
matched = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !matched {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
defer fp.Close()
|
|
||||||
scanner := bufio.NewScanner(fp)
|
// Found the matching DRM directory
|
||||||
for scanner.Scan() {
|
slog.Debug("matched", "amdgpu", match, "drm", devDir)
|
||||||
line := strings.TrimSpace(scanner.Text())
|
totalFile := filepath.Join(devDir, DRMTotalMemoryFile)
|
||||||
if strings.HasPrefix(line, "size_in_bytes") {
|
buf, err := os.ReadFile(totalFile)
|
||||||
ver := strings.Fields(line)
|
|
||||||
if len(ver) != 2 {
|
|
||||||
slog.Warn("malformed " + line)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
bankSizeInBytes, err := strconv.ParseUint(ver[1], 10, 64)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("malformed int " + line)
|
slog.Debug("failed to read sysfs node", "file", totalFile, "error", err)
|
||||||
continue
|
break
|
||||||
}
|
}
|
||||||
totalMemory += bankSizeInBytes
|
totalMemory, err = strconv.ParseUint(strings.TrimSpace(string(buf)), 10, 64)
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if totalMemory == 0 {
|
|
||||||
slog.Warn("amdgpu reports zero total memory", "gpu", gpuID)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
usedGlob := filepath.Join(AMDNodesSysfsDir, strconv.Itoa(nodeID), GPUUsedMemoryFileGlob)
|
|
||||||
usedFiles, err := filepath.Glob(usedGlob)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("error looking up used GPU memory", "glob", usedGlob, "error", err)
|
slog.Debug("failed to parse sysfs node", "file", totalFile, "error", err)
|
||||||
continue
|
break
|
||||||
}
|
}
|
||||||
for _, usedFile := range usedFiles {
|
|
||||||
fp, err := os.Open(usedFile)
|
usedFile = filepath.Join(devDir, DRMUsedMemoryFile)
|
||||||
|
usedMemory, err = getFreeMemory(usedFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("failed to open sysfs node", "file", usedFile, "error", err)
|
slog.Debug("failed to update used memory", "error", err)
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
defer fp.Close()
|
break
|
||||||
data, err := io.ReadAll(fp)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn("failed to read sysfs node", "file", usedFile, "error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
used, err := strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn("malformed used memory", "data", string(data), "error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
usedMemory += used
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
// iGPU detection, remove this check once we can support an iGPU variant of the rocm library
|
||||||
@@ -241,18 +267,21 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
|
|
||||||
slog.Debug("amdgpu memory", "gpu", gpuID, "total", format.HumanBytes2(totalMemory))
|
slog.Debug("amdgpu memory", "gpu", gpuID, "total", format.HumanBytes2(totalMemory))
|
||||||
slog.Debug("amdgpu memory", "gpu", gpuID, "available", format.HumanBytes2(totalMemory-usedMemory))
|
slog.Debug("amdgpu memory", "gpu", gpuID, "available", format.HumanBytes2(totalMemory-usedMemory))
|
||||||
gpuInfo := GpuInfo{
|
gpuInfo := RocmGPUInfo{
|
||||||
|
GpuInfo: GpuInfo{
|
||||||
Library: "rocm",
|
Library: "rocm",
|
||||||
memInfo: memInfo{
|
memInfo: memInfo{
|
||||||
TotalMemory: totalMemory,
|
TotalMemory: totalMemory,
|
||||||
FreeMemory: (totalMemory - usedMemory),
|
FreeMemory: (totalMemory - usedMemory),
|
||||||
},
|
},
|
||||||
ID: fmt.Sprintf("%d", gpuID),
|
ID: strconv.Itoa(gpuID),
|
||||||
Name: name,
|
Name: name,
|
||||||
Compute: fmt.Sprintf("gfx%d%x%x", major, minor, patch),
|
Compute: fmt.Sprintf("gfx%d%x%x", major, minor, patch),
|
||||||
MinimumMemory: rocmMinimumMemory,
|
MinimumMemory: rocmMinimumMemory,
|
||||||
DriverMajor: driverMajor,
|
DriverMajor: driverMajor,
|
||||||
DriverMinor: driverMinor,
|
DriverMinor: driverMinor,
|
||||||
|
},
|
||||||
|
usedFilepath: usedFile,
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the user wants to filter to a subset of devices, filter out if we aren't a match
|
// If the user wants to filter to a subset of devices, filter out if we aren't a match
|
||||||
@@ -276,7 +305,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
libDir, err = AMDValidateLibDir()
|
libDir, err = AMDValidateLibDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("unable to verify rocm library, will use cpu", "error", err)
|
slog.Warn("unable to verify rocm library, will use cpu", "error", err)
|
||||||
return []GpuInfo{}
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gpuInfo.DependencyPath = libDir
|
gpuInfo.DependencyPath = libDir
|
||||||
@@ -287,7 +316,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
supported, err = GetSupportedGFX(libDir)
|
supported, err = GetSupportedGFX(libDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("failed to lookup supported GFX types, falling back to CPU mode", "error", err)
|
slog.Warn("failed to lookup supported GFX types, falling back to CPU mode", "error", err)
|
||||||
return []GpuInfo{}
|
return nil
|
||||||
}
|
}
|
||||||
slog.Debug("rocm supported GPUs", "types", supported)
|
slog.Debug("rocm supported GPUs", "types", supported)
|
||||||
}
|
}
|
||||||
@@ -304,6 +333,11 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
slog.Info("skipping rocm gfx compatibility check", "HSA_OVERRIDE_GFX_VERSION", gfxOverride)
|
slog.Info("skipping rocm gfx compatibility check", "HSA_OVERRIDE_GFX_VERSION", gfxOverride)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for env var workarounds
|
||||||
|
if name == "1002:687f" { // Vega RX 56
|
||||||
|
gpuInfo.EnvWorkarounds = append(gpuInfo.EnvWorkarounds, [2]string{"HSA_ENABLE_SDMA", "0"})
|
||||||
|
}
|
||||||
|
|
||||||
// The GPU has passed all the verification steps and is supported
|
// The GPU has passed all the verification steps and is supported
|
||||||
resp = append(resp, gpuInfo)
|
resp = append(resp, gpuInfo)
|
||||||
}
|
}
|
||||||
@@ -378,3 +412,31 @@ func AMDDriverVersion() (driverMajor, driverMinor int, err error) {
|
|||||||
}
|
}
|
||||||
return driverMajor, driverMinor, nil
|
return driverMajor, driverMinor, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gpus RocmGPUInfoList) RefreshFreeMemory() error {
|
||||||
|
if len(gpus) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for i := range gpus {
|
||||||
|
usedMemory, err := getFreeMemory(gpus[i].usedFilepath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
slog.Debug("updating rocm free memory", "gpu", gpus[i].ID, "name", gpus[i].Name, "before", format.HumanBytes2(gpus[i].FreeMemory), "now", format.HumanBytes2(gpus[i].TotalMemory-usedMemory))
|
||||||
|
gpus[i].FreeMemory = gpus[i].TotalMemory - usedMemory
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFreeMemory(usedFile string) (uint64, error) {
|
||||||
|
buf, err := os.ReadFile(usedFile)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("failed to read sysfs node %s %w", usedFile, err)
|
||||||
|
}
|
||||||
|
usedMemory, err := strconv.ParseUint(strings.TrimSpace(string(buf)), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("failed to parse sysfs node", "file", usedFile, "error", err)
|
||||||
|
return 0, fmt.Errorf("failed to parse sysfs node %s %w", usedFile, err)
|
||||||
|
}
|
||||||
|
return usedMemory, nil
|
||||||
|
}
|
||||||
|
@@ -7,8 +7,10 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -20,12 +22,12 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// Used to validate if the given ROCm lib is usable
|
// Used to validate if the given ROCm lib is usable
|
||||||
ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here...
|
ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // This is not sufficient to discern v5 vs v6
|
||||||
RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\5.7\\bin"} // TODO glob?
|
RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\6.1\\bin"} // TODO glob?
|
||||||
)
|
)
|
||||||
|
|
||||||
func AMDGetGPUInfo() []GpuInfo {
|
func AMDGetGPUInfo() []RocmGPUInfo {
|
||||||
resp := []GpuInfo{}
|
resp := []RocmGPUInfo{}
|
||||||
hl, err := NewHipLib()
|
hl, err := NewHipLib()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug(err.Error())
|
slog.Debug(err.Error())
|
||||||
@@ -33,12 +35,11 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
}
|
}
|
||||||
defer hl.Release()
|
defer hl.Release()
|
||||||
|
|
||||||
// TODO - this reports incorrect version information, so omitting for now
|
driverMajor, driverMinor, err := hl.AMDDriverVersion()
|
||||||
// driverMajor, driverMinor, err := hl.AMDDriverVersion()
|
if err != nil {
|
||||||
// if err != nil {
|
// For now this is benign, but we may eventually need to fail compatibility checks
|
||||||
// // For now this is benign, but we may eventually need to fail compatibility checks
|
slog.Debug("error looking up amd driver version", "error", err)
|
||||||
// slog.Debug("error looking up amd driver version", "error", err)
|
}
|
||||||
// }
|
|
||||||
|
|
||||||
// Note: the HIP library automatically handles subsetting to any HIP_VISIBLE_DEVICES the user specified
|
// Note: the HIP library automatically handles subsetting to any HIP_VISIBLE_DEVICES the user specified
|
||||||
count := hl.HipGetDeviceCount()
|
count := hl.HipGetDeviceCount()
|
||||||
@@ -52,7 +53,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var supported []string
|
var supported []string
|
||||||
gfxOverride := os.Getenv("HSA_OVERRIDE_GFX_VERSION")
|
gfxOverride := envconfig.HsaOverrideGfxVersion
|
||||||
if gfxOverride == "" {
|
if gfxOverride == "" {
|
||||||
supported, err = GetSupportedGFX(libDir)
|
supported, err = GetSupportedGFX(libDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -65,7 +66,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
|
|
||||||
slog.Debug("detected hip devices", "count", count)
|
slog.Debug("detected hip devices", "count", count)
|
||||||
// TODO how to determine the underlying device ID when visible devices is causing this to subset?
|
// TODO how to determine the underlying device ID when visible devices is causing this to subset?
|
||||||
for i := 0; i < count; i++ {
|
for i := range count {
|
||||||
err = hl.HipSetDevice(i)
|
err = hl.HipSetDevice(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("set device", "id", i, "error", err)
|
slog.Warn("set device", "id", i, "error", err)
|
||||||
@@ -113,25 +114,27 @@ func AMDGetGPUInfo() []GpuInfo {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO revisit this once ROCm v6 is available on windows.
|
|
||||||
// v5.7 only reports VRAM used by this process, so it's completely wrong and unusable
|
|
||||||
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||||
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
||||||
gpuInfo := GpuInfo{
|
gpuInfo := RocmGPUInfo{
|
||||||
|
GpuInfo: GpuInfo{
|
||||||
Library: "rocm",
|
Library: "rocm",
|
||||||
memInfo: memInfo{
|
memInfo: memInfo{
|
||||||
TotalMemory: totalMemory,
|
TotalMemory: totalMemory,
|
||||||
FreeMemory: freeMemory,
|
FreeMemory: freeMemory,
|
||||||
},
|
},
|
||||||
ID: fmt.Sprintf("%d", i), // TODO this is probably wrong if we specify visible devices
|
// Free memory reporting on Windows is not reliable until we bump to ROCm v6.2
|
||||||
|
UnreliableFreeMemory: true,
|
||||||
|
|
||||||
|
ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices
|
||||||
DependencyPath: libDir,
|
DependencyPath: libDir,
|
||||||
MinimumMemory: rocmMinimumMemory,
|
MinimumMemory: rocmMinimumMemory,
|
||||||
Name: name,
|
Name: name,
|
||||||
Compute: gfx,
|
Compute: gfx,
|
||||||
|
DriverMajor: driverMajor,
|
||||||
// TODO - this information isn't accurate on windows, so don't report it until we find the right way to retrieve
|
DriverMinor: driverMinor,
|
||||||
// DriverMajor: driverMajor,
|
},
|
||||||
// DriverMinor: driverMinor,
|
index: i,
|
||||||
}
|
}
|
||||||
|
|
||||||
resp = append(resp, gpuInfo)
|
resp = append(resp, gpuInfo)
|
||||||
@@ -159,3 +162,30 @@ func AMDValidateLibDir() (string, error) {
|
|||||||
slog.Warn("amdgpu detected, but no compatible rocm library found. Please install ROCm")
|
slog.Warn("amdgpu detected, but no compatible rocm library found. Please install ROCm")
|
||||||
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
return "", fmt.Errorf("no suitable rocm found, falling back to CPU")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gpus RocmGPUInfoList) RefreshFreeMemory() error {
|
||||||
|
if len(gpus) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hl, err := NewHipLib()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug(err.Error())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer hl.Release()
|
||||||
|
|
||||||
|
for i := range gpus {
|
||||||
|
err := hl.HipSetDevice(gpus[i].index)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
freeMemory, _, err := hl.HipMemGetInfo()
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("get mem info", "id", i, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
slog.Debug("updating rocm free memory", "gpu", gpus[i].ID, "name", gpus[i].Name, "before", format.HumanBytes2(gpus[i].FreeMemory), "now", format.HumanBytes2(freeMemory))
|
||||||
|
gpus[i].FreeMemory = freeMemory
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@@ -77,20 +77,27 @@ func cleanupTmpDirs() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
raw, err := os.ReadFile(filepath.Join(d, "ollama.pid"))
|
raw, err := os.ReadFile(filepath.Join(d, "ollama.pid"))
|
||||||
if err == nil {
|
if err != nil {
|
||||||
|
slog.Warn("failed to read ollama.pid", "path", d, "error", err)
|
||||||
|
// No pid, ignore this tmpdir
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
pid, err := strconv.Atoi(string(raw))
|
pid, err := strconv.Atoi(string(raw))
|
||||||
if err == nil {
|
if err != nil {
|
||||||
if proc, err := os.FindProcess(int(pid)); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) {
|
slog.Warn("failed to parse pid", "path", d, "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
proc, err := os.FindProcess(pid)
|
||||||
|
if err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) {
|
||||||
|
slog.Warn("found running ollama", "pid", pid, "path", d)
|
||||||
// Another running ollama, ignore this tmpdir
|
// Another running ollama, ignore this tmpdir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
|
||||||
} else {
|
if err := os.Remove(d); err != nil {
|
||||||
slog.Debug("failed to open ollama.pid", "path", d, "error", err)
|
slog.Warn("unable to cleanup stale tmpdir", "path", d, "error", err)
|
||||||
}
|
|
||||||
err = os.RemoveAll(d)
|
|
||||||
if err != nil {
|
|
||||||
slog.Debug("unable to cleanup stale tmpdir", "path", d, "error", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -1,21 +1,16 @@
|
|||||||
package gpu
|
package gpu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log/slog"
|
|
||||||
|
|
||||||
"golang.org/x/sys/cpu"
|
"golang.org/x/sys/cpu"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetCPUVariant() string {
|
func GetCPUCapability() CPUCapability {
|
||||||
if cpu.X86.HasAVX2 {
|
if cpu.X86.HasAVX2 {
|
||||||
slog.Debug("CPU has AVX2")
|
return CPUCapabilityAVX2
|
||||||
return "avx2"
|
|
||||||
}
|
}
|
||||||
if cpu.X86.HasAVX {
|
if cpu.X86.HasAVX {
|
||||||
slog.Debug("CPU has AVX")
|
return CPUCapabilityAVX
|
||||||
return "avx"
|
|
||||||
}
|
}
|
||||||
slog.Debug("CPU does not have vector extensions")
|
|
||||||
// else LCD
|
// else LCD
|
||||||
return ""
|
return CPUCapabilityNone
|
||||||
}
|
}
|
||||||
|
@@ -18,5 +18,4 @@ func cudaGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
|||||||
ids = append(ids, info.ID)
|
ids = append(ids, info.ID)
|
||||||
}
|
}
|
||||||
return "CUDA_VISIBLE_DEVICES", strings.Join(ids, ",")
|
return "CUDA_VISIBLE_DEVICES", strings.Join(ids, ",")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
534
gpu/gpu.go
534
gpu/gpu.go
@@ -16,28 +16,45 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/ollama/ollama/format"
|
|
||||||
"github.com/ollama/ollama/envconfig"
|
"github.com/ollama/ollama/envconfig"
|
||||||
|
"github.com/ollama/ollama/format"
|
||||||
)
|
)
|
||||||
|
|
||||||
type handles struct {
|
type cudaHandles struct {
|
||||||
deviceCount int
|
deviceCount int
|
||||||
cudart *C.cudart_handle_t
|
cudart *C.cudart_handle_t
|
||||||
nvcuda *C.nvcuda_handle_t
|
nvcuda *C.nvcuda_handle_t
|
||||||
|
nvml *C.nvml_handle_t
|
||||||
|
}
|
||||||
|
|
||||||
|
type oneapiHandles struct {
|
||||||
oneapi *C.oneapi_handle_t
|
oneapi *C.oneapi_handle_t
|
||||||
|
deviceCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cudaMinimumMemory = 457 * format.MebiByte
|
cudaMinimumMemory = 457 * format.MebiByte
|
||||||
rocmMinimumMemory = 457 * format.MebiByte
|
rocmMinimumMemory = 457 * format.MebiByte
|
||||||
|
// TODO OneAPI minimum memory
|
||||||
)
|
)
|
||||||
|
|
||||||
var gpuMutex sync.Mutex
|
var (
|
||||||
|
gpuMutex sync.Mutex
|
||||||
|
bootstrapped bool
|
||||||
|
cpuCapability CPUCapability
|
||||||
|
cpus []CPUInfo
|
||||||
|
cudaGPUs []CudaGPUInfo
|
||||||
|
nvcudaLibPath string
|
||||||
|
cudartLibPath string
|
||||||
|
oneapiLibPath string
|
||||||
|
nvmlLibPath string
|
||||||
|
rocmGPUs []RocmGPUInfo
|
||||||
|
oneapiGPUs []OneapiGPUInfo
|
||||||
|
)
|
||||||
|
|
||||||
// With our current CUDA compile flags, older than 5.0 will not work properly
|
// With our current CUDA compile flags, older than 5.0 will not work properly
|
||||||
var CudaComputeMin = [2]C.int{5, 0}
|
var CudaComputeMin = [2]C.int{5, 0}
|
||||||
@@ -47,130 +64,113 @@ var RocmComputeMin = 9
|
|||||||
// TODO find a better way to detect iGPU instead of minimum memory
|
// TODO find a better way to detect iGPU instead of minimum memory
|
||||||
const IGPUMemLimit = 1 * format.GibiByte // 512G is what they typically report, so anything less than 1G must be iGPU
|
const IGPUMemLimit = 1 * format.GibiByte // 512G is what they typically report, so anything less than 1G must be iGPU
|
||||||
|
|
||||||
var CudartLinuxGlobs = []string{
|
|
||||||
"/usr/local/cuda/lib64/libcudart.so*",
|
|
||||||
"/usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so*",
|
|
||||||
"/usr/lib/x86_64-linux-gnu/libcudart.so*",
|
|
||||||
"/usr/lib/wsl/lib/libcudart.so*",
|
|
||||||
"/usr/lib/wsl/drivers/*/libcudart.so*",
|
|
||||||
"/opt/cuda/lib64/libcudart.so*",
|
|
||||||
"/usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so*",
|
|
||||||
"/usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so*",
|
|
||||||
"/usr/lib/aarch64-linux-gnu/libcudart.so*",
|
|
||||||
"/usr/local/cuda/lib*/libcudart.so*",
|
|
||||||
"/usr/lib*/libcudart.so*",
|
|
||||||
"/usr/local/lib*/libcudart.so*",
|
|
||||||
}
|
|
||||||
|
|
||||||
var CudartWindowsGlobs = []string{
|
|
||||||
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
|
||||||
}
|
|
||||||
|
|
||||||
var NvcudaLinuxGlobs = []string{
|
|
||||||
"/usr/local/cuda*/targets/*/lib/libcuda.so*",
|
|
||||||
"/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
|
|
||||||
"/usr/lib/*-linux-gnu/libcuda.so*",
|
|
||||||
"/usr/lib/wsl/lib/libcuda.so*",
|
|
||||||
"/usr/lib/wsl/drivers/*/libcuda.so*",
|
|
||||||
"/opt/cuda/lib*/libcuda.so*",
|
|
||||||
"/usr/local/cuda/lib*/libcuda.so*",
|
|
||||||
"/usr/lib*/libcuda.so*",
|
|
||||||
"/usr/local/lib*/libcuda.so*",
|
|
||||||
}
|
|
||||||
|
|
||||||
var NvcudaWindowsGlobs = []string{
|
|
||||||
"c:\\windows\\system*\\nvcuda.dll",
|
|
||||||
}
|
|
||||||
|
|
||||||
var OneapiWindowsGlobs = []string{
|
|
||||||
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
|
||||||
}
|
|
||||||
|
|
||||||
var OneapiLinuxGlobs = []string{
|
|
||||||
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
|
||||||
"/usr/lib*/libze_intel_gpu.so*",
|
|
||||||
}
|
|
||||||
|
|
||||||
// Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
|
// Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
|
||||||
// Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
|
// Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
|
||||||
var CudaTegra string = os.Getenv("JETSON_JETPACK")
|
var CudaTegra string = os.Getenv("JETSON_JETPACK")
|
||||||
|
|
||||||
// Note: gpuMutex must already be held
|
// Note: gpuMutex must already be held
|
||||||
func initGPUHandles() *handles {
|
func initCudaHandles() *cudaHandles {
|
||||||
|
|
||||||
// TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
|
// TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
|
||||||
|
|
||||||
gpuHandles := &handles{}
|
cHandles := &cudaHandles{}
|
||||||
var cudartMgmtName string
|
// Short Circuit if we already know which library to use
|
||||||
var cudartMgmtPatterns []string
|
if nvmlLibPath != "" {
|
||||||
var nvcudaMgmtName string
|
cHandles.nvml, _ = LoadNVMLMgmt([]string{nvmlLibPath})
|
||||||
var nvcudaMgmtPatterns []string
|
return cHandles
|
||||||
var oneapiMgmtName string
|
}
|
||||||
var oneapiMgmtPatterns []string
|
if nvcudaLibPath != "" {
|
||||||
|
cHandles.deviceCount, cHandles.nvcuda, _ = LoadNVCUDAMgmt([]string{nvcudaLibPath})
|
||||||
|
return cHandles
|
||||||
|
}
|
||||||
|
if cudartLibPath != "" {
|
||||||
|
cHandles.deviceCount, cHandles.cudart, _ = LoadCUDARTMgmt([]string{cudartLibPath})
|
||||||
|
return cHandles
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug("searching for GPU discovery libraries for NVIDIA")
|
||||||
|
var cudartMgmtPatterns []string
|
||||||
|
|
||||||
tmpDir, _ := PayloadsDir()
|
|
||||||
switch runtime.GOOS {
|
|
||||||
case "windows":
|
|
||||||
cudartMgmtName = "cudart64_*.dll"
|
|
||||||
localAppData := os.Getenv("LOCALAPPDATA")
|
|
||||||
cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", cudartMgmtName)}
|
|
||||||
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartWindowsGlobs...)
|
|
||||||
// Aligned with driver, we can't carry as payloads
|
// Aligned with driver, we can't carry as payloads
|
||||||
nvcudaMgmtName = "nvcuda.dll"
|
nvcudaMgmtPatterns := NvcudaGlobs
|
||||||
nvcudaMgmtPatterns = NvcudaWindowsGlobs
|
|
||||||
oneapiMgmtName = "ze_intel_gpu64.dll"
|
if runtime.GOOS == "windows" {
|
||||||
oneapiMgmtPatterns = OneapiWindowsGlobs
|
localAppData := os.Getenv("LOCALAPPDATA")
|
||||||
case "linux":
|
cudartMgmtPatterns = []string{filepath.Join(localAppData, "Programs", "Ollama", CudartMgmtName)}
|
||||||
cudartMgmtName = "libcudart.so*"
|
}
|
||||||
|
tmpDir, _ := PayloadsDir()
|
||||||
if tmpDir != "" {
|
if tmpDir != "" {
|
||||||
// TODO - add "payloads" for subprocess
|
// TODO - add "payloads" for subprocess
|
||||||
cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", cudartMgmtName)}
|
cudartMgmtPatterns = []string{filepath.Join(tmpDir, "cuda*", CudartMgmtName)}
|
||||||
|
}
|
||||||
|
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartGlobs...)
|
||||||
|
|
||||||
|
if len(NvmlGlobs) > 0 {
|
||||||
|
nvmlLibPaths := FindGPULibs(NvmlMgmtName, NvmlGlobs)
|
||||||
|
if len(nvmlLibPaths) > 0 {
|
||||||
|
nvml, libPath := LoadNVMLMgmt(nvmlLibPaths)
|
||||||
|
if nvml != nil {
|
||||||
|
slog.Debug("nvidia-ml loaded", "library", libPath)
|
||||||
|
cHandles.nvml = nvml
|
||||||
|
nvmlLibPath = libPath
|
||||||
|
}
|
||||||
}
|
}
|
||||||
cudartMgmtPatterns = append(cudartMgmtPatterns, CudartLinuxGlobs...)
|
|
||||||
// Aligned with driver, we can't carry as payloads
|
|
||||||
nvcudaMgmtName = "libcuda.so*"
|
|
||||||
nvcudaMgmtPatterns = NvcudaLinuxGlobs
|
|
||||||
oneapiMgmtName = "libze_intel_gpu.so"
|
|
||||||
oneapiMgmtPatterns = OneapiLinuxGlobs
|
|
||||||
default:
|
|
||||||
return gpuHandles
|
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("Detecting GPUs")
|
nvcudaLibPaths := FindGPULibs(NvcudaMgmtName, nvcudaMgmtPatterns)
|
||||||
nvcudaLibPaths := FindGPULibs(nvcudaMgmtName, nvcudaMgmtPatterns)
|
|
||||||
if len(nvcudaLibPaths) > 0 {
|
if len(nvcudaLibPaths) > 0 {
|
||||||
deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths)
|
deviceCount, nvcuda, libPath := LoadNVCUDAMgmt(nvcudaLibPaths)
|
||||||
if nvcuda != nil {
|
if nvcuda != nil {
|
||||||
slog.Debug("detected GPUs", "count", deviceCount, "library", libPath)
|
slog.Debug("detected GPUs", "count", deviceCount, "library", libPath)
|
||||||
gpuHandles.nvcuda = nvcuda
|
cHandles.nvcuda = nvcuda
|
||||||
gpuHandles.deviceCount = deviceCount
|
cHandles.deviceCount = deviceCount
|
||||||
return gpuHandles
|
nvcudaLibPath = libPath
|
||||||
|
return cHandles
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cudartLibPaths := FindGPULibs(cudartMgmtName, cudartMgmtPatterns)
|
cudartLibPaths := FindGPULibs(CudartMgmtName, cudartMgmtPatterns)
|
||||||
if len(cudartLibPaths) > 0 {
|
if len(cudartLibPaths) > 0 {
|
||||||
deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths)
|
deviceCount, cudart, libPath := LoadCUDARTMgmt(cudartLibPaths)
|
||||||
if cudart != nil {
|
if cudart != nil {
|
||||||
slog.Debug("detected GPUs", "library", libPath, "count", deviceCount)
|
slog.Debug("detected GPUs", "library", libPath, "count", deviceCount)
|
||||||
gpuHandles.cudart = cudart
|
cHandles.cudart = cudart
|
||||||
gpuHandles.deviceCount = deviceCount
|
cHandles.deviceCount = deviceCount
|
||||||
return gpuHandles
|
cudartLibPath = libPath
|
||||||
|
return cHandles
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oneapiLibPaths := FindGPULibs(oneapiMgmtName, oneapiMgmtPatterns)
|
return cHandles
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: gpuMutex must already be held
|
||||||
|
func initOneAPIHandles() *oneapiHandles {
|
||||||
|
oHandles := &oneapiHandles{}
|
||||||
|
|
||||||
|
// Short Circuit if we already know which library to use
|
||||||
|
if oneapiLibPath != "" {
|
||||||
|
oHandles.deviceCount, oHandles.oneapi, _ = LoadOneapiMgmt([]string{oneapiLibPath})
|
||||||
|
return oHandles
|
||||||
|
}
|
||||||
|
|
||||||
|
oneapiLibPaths := FindGPULibs(OneapiMgmtName, OneapiGlobs)
|
||||||
if len(oneapiLibPaths) > 0 {
|
if len(oneapiLibPaths) > 0 {
|
||||||
deviceCount, oneapi, libPath := LoadOneapiMgmt(oneapiLibPaths)
|
oHandles.deviceCount, oHandles.oneapi, oneapiLibPath = LoadOneapiMgmt(oneapiLibPaths)
|
||||||
if oneapi != nil {
|
|
||||||
slog.Debug("detected Intel GPUs", "library", libPath, "count", deviceCount)
|
|
||||||
gpuHandles.oneapi = oneapi
|
|
||||||
gpuHandles.deviceCount = deviceCount
|
|
||||||
return gpuHandles
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return gpuHandles
|
return oHandles
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetCPUInfo() GpuInfoList {
|
||||||
|
gpuMutex.Lock()
|
||||||
|
if !bootstrapped {
|
||||||
|
gpuMutex.Unlock()
|
||||||
|
GetGPUInfo()
|
||||||
|
} else {
|
||||||
|
gpuMutex.Unlock()
|
||||||
|
}
|
||||||
|
return GpuInfoList{cpus[0].GpuInfo}
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetGPUInfo() GpuInfoList {
|
func GetGPUInfo() GpuInfoList {
|
||||||
@@ -178,50 +178,82 @@ func GetGPUInfo() GpuInfoList {
|
|||||||
// GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
|
// GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
|
||||||
gpuMutex.Lock()
|
gpuMutex.Lock()
|
||||||
defer gpuMutex.Unlock()
|
defer gpuMutex.Unlock()
|
||||||
|
needRefresh := true
|
||||||
gpuHandles := initGPUHandles()
|
var cHandles *cudaHandles
|
||||||
|
var oHandles *oneapiHandles
|
||||||
defer func() {
|
defer func() {
|
||||||
if gpuHandles.cudart != nil {
|
if cHandles != nil {
|
||||||
C.cudart_release(*gpuHandles.cudart)
|
if cHandles.cudart != nil {
|
||||||
|
C.cudart_release(*cHandles.cudart)
|
||||||
|
}
|
||||||
|
if cHandles.nvcuda != nil {
|
||||||
|
C.nvcuda_release(*cHandles.nvcuda)
|
||||||
|
}
|
||||||
|
if cHandles.nvml != nil {
|
||||||
|
C.nvml_release(*cHandles.nvml)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if oHandles != nil {
|
||||||
|
if oHandles.oneapi != nil {
|
||||||
|
// TODO - is this needed?
|
||||||
|
C.oneapi_release(*oHandles.oneapi)
|
||||||
}
|
}
|
||||||
if gpuHandles.nvcuda != nil {
|
|
||||||
C.nvcuda_release(*gpuHandles.nvcuda)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// All our GPU builds on x86 have AVX enabled, so fallback to CPU if we don't detect at least AVX
|
if !bootstrapped {
|
||||||
cpuVariant := GetCPUVariant()
|
slog.Info("looking for compatible GPUs")
|
||||||
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
needRefresh = false
|
||||||
slog.Warn("CPU does not have AVX or AVX2, disabling GPU support.")
|
cpuCapability = GetCPUCapability()
|
||||||
|
var memInfo C.mem_info_t
|
||||||
|
|
||||||
|
mem, err := GetCPUMem()
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("error looking up system memory", "error", err)
|
||||||
|
}
|
||||||
|
cpus = []CPUInfo{CPUInfo{
|
||||||
|
GpuInfo: GpuInfo{
|
||||||
|
memInfo: mem,
|
||||||
|
Library: "cpu",
|
||||||
|
Variant: cpuCapability,
|
||||||
|
ID: "0",
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
|
||||||
|
// Fallback to CPU mode if we're lacking required vector extensions on x86
|
||||||
|
if cpuCapability < GPURunnerCPUCapability && runtime.GOARCH == "amd64" {
|
||||||
|
slog.Warn("CPU does not have minimum vector extensions, GPU inference disabled", "required", GPURunnerCPUCapability, "detected", cpuCapability)
|
||||||
|
bootstrapped = true
|
||||||
|
// No need to do any GPU discovery, since we can't run on them
|
||||||
|
return GpuInfoList{cpus[0].GpuInfo}
|
||||||
}
|
}
|
||||||
|
|
||||||
// On windows we bundle the nvidia library one level above the runner dir
|
// On windows we bundle the nvidia library one level above the runner dir
|
||||||
depPath := ""
|
depPath := ""
|
||||||
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
||||||
depPath = filepath.Dir(envconfig.RunnersDir)
|
depPath = filepath.Join(filepath.Dir(envconfig.RunnersDir), "cuda")
|
||||||
}
|
}
|
||||||
|
|
||||||
var memInfo C.mem_info_t
|
// Load ALL libraries
|
||||||
resp := []GpuInfo{}
|
cHandles = initCudaHandles()
|
||||||
|
|
||||||
// NVIDIA first
|
// NVIDIA
|
||||||
for i := 0; i < gpuHandles.deviceCount; i++ {
|
for i := range cHandles.deviceCount {
|
||||||
// TODO once we support CPU compilation variants of GPU libraries refine this...
|
if cHandles.cudart != nil || cHandles.nvcuda != nil {
|
||||||
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
gpuInfo := CudaGPUInfo{
|
||||||
continue
|
GpuInfo: GpuInfo{
|
||||||
}
|
|
||||||
if gpuHandles.cudart != nil || gpuHandles.nvcuda != nil {
|
|
||||||
gpuInfo := GpuInfo{
|
|
||||||
Library: "cuda",
|
Library: "cuda",
|
||||||
|
},
|
||||||
|
index: i,
|
||||||
}
|
}
|
||||||
var driverMajor int
|
var driverMajor int
|
||||||
var driverMinor int
|
var driverMinor int
|
||||||
if gpuHandles.cudart != nil {
|
if cHandles.cudart != nil {
|
||||||
C.cudart_check_vram(*gpuHandles.cudart, C.int(i), &memInfo)
|
C.cudart_bootstrap(*cHandles.cudart, C.int(i), &memInfo)
|
||||||
} else {
|
} else {
|
||||||
C.nvcuda_check_vram(*gpuHandles.nvcuda, C.int(i), &memInfo)
|
C.nvcuda_bootstrap(*cHandles.nvcuda, C.int(i), &memInfo)
|
||||||
driverMajor = int(gpuHandles.nvcuda.driver_major)
|
driverMajor = int(cHandles.nvcuda.driver_major)
|
||||||
driverMinor = int(gpuHandles.nvcuda.driver_minor)
|
driverMinor = int(cHandles.nvcuda.driver_minor)
|
||||||
}
|
}
|
||||||
if memInfo.err != nil {
|
if memInfo.err != nil {
|
||||||
slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||||
@@ -239,63 +271,195 @@ func GetGPUInfo() GpuInfoList {
|
|||||||
gpuInfo.MinimumMemory = cudaMinimumMemory
|
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||||
gpuInfo.DependencyPath = depPath
|
gpuInfo.DependencyPath = depPath
|
||||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||||
gpuInfo.DriverMajor = int(driverMajor)
|
gpuInfo.DriverMajor = driverMajor
|
||||||
gpuInfo.DriverMinor = int(driverMinor)
|
gpuInfo.DriverMinor = driverMinor
|
||||||
|
|
||||||
|
// query the management library as well so we can record any skew between the two
|
||||||
|
// which represents overhead on the GPU we must set aside on subsequent updates
|
||||||
|
if cHandles.nvml != nil {
|
||||||
|
C.nvml_get_free(*cHandles.nvml, C.int(gpuInfo.index), &memInfo.free, &memInfo.total, &memInfo.used)
|
||||||
|
if memInfo.err != nil {
|
||||||
|
slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||||
|
C.free(unsafe.Pointer(memInfo.err))
|
||||||
|
} else {
|
||||||
|
if memInfo.free != 0 && uint64(memInfo.free) > gpuInfo.FreeMemory {
|
||||||
|
gpuInfo.OSOverhead = uint64(memInfo.free) - gpuInfo.FreeMemory
|
||||||
|
slog.Info("detected OS VRAM overhead",
|
||||||
|
"id", gpuInfo.ID,
|
||||||
|
"library", gpuInfo.Library,
|
||||||
|
"compute", gpuInfo.Compute,
|
||||||
|
"driver", fmt.Sprintf("%d.%d", gpuInfo.DriverMajor, gpuInfo.DriverMinor),
|
||||||
|
"name", gpuInfo.Name,
|
||||||
|
"overhead", format.HumanBytes2(gpuInfo.OSOverhead),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||||
resp = append(resp, gpuInfo)
|
cudaGPUs = append(cudaGPUs, gpuInfo)
|
||||||
}
|
}
|
||||||
if gpuHandles.oneapi != nil {
|
}
|
||||||
gpuInfo := GpuInfo{
|
|
||||||
|
// Intel
|
||||||
|
if envconfig.IntelGpu {
|
||||||
|
oHandles = initOneAPIHandles()
|
||||||
|
// On windows we bundle the oneapi library one level above the runner dir
|
||||||
|
depPath = ""
|
||||||
|
if runtime.GOOS == "windows" && envconfig.RunnersDir != "" {
|
||||||
|
depPath = filepath.Join(filepath.Dir(envconfig.RunnersDir), "oneapi")
|
||||||
|
}
|
||||||
|
|
||||||
|
for d := range oHandles.oneapi.num_drivers {
|
||||||
|
if oHandles.oneapi == nil {
|
||||||
|
// shouldn't happen
|
||||||
|
slog.Warn("nil oneapi handle with driver count", "count", int(oHandles.oneapi.num_drivers))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
devCount := C.oneapi_get_device_count(*oHandles.oneapi, C.int(d))
|
||||||
|
for i := range devCount {
|
||||||
|
gpuInfo := OneapiGPUInfo{
|
||||||
|
GpuInfo: GpuInfo{
|
||||||
Library: "oneapi",
|
Library: "oneapi",
|
||||||
|
},
|
||||||
|
driverIndex: int(d),
|
||||||
|
gpuIndex: int(i),
|
||||||
}
|
}
|
||||||
C.oneapi_check_vram(*gpuHandles.oneapi, &memInfo)
|
// TODO - split bootstrapping from updating free memory
|
||||||
|
C.oneapi_check_vram(*oHandles.oneapi, C.int(d), i, &memInfo)
|
||||||
|
// TODO - convert this to MinimumMemory based on testing...
|
||||||
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
||||||
memInfo.free = C.uint64_t(totalFreeMem)
|
memInfo.free = C.uint64_t(totalFreeMem)
|
||||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||||
gpuInfo.ID = strconv.Itoa(i)
|
|
||||||
resp = append(resp, gpuInfo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then AMD
|
|
||||||
resp = append(resp, AMDGetGPUInfo()...)
|
|
||||||
|
|
||||||
if len(resp) == 0 {
|
|
||||||
C.cpu_check_ram(&memInfo)
|
|
||||||
if memInfo.err != nil {
|
|
||||||
slog.Info("error looking up CPU memory", "error", C.GoString(memInfo.err))
|
|
||||||
C.free(unsafe.Pointer(memInfo.err))
|
|
||||||
return resp
|
|
||||||
}
|
|
||||||
gpuInfo := GpuInfo{
|
|
||||||
Library: "cpu",
|
|
||||||
Variant: cpuVariant,
|
|
||||||
}
|
|
||||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
|
||||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
|
||||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||||
|
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||||
resp = append(resp, gpuInfo)
|
gpuInfo.DependencyPath = depPath
|
||||||
|
oneapiGPUs = append(oneapiGPUs, gpuInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rocmGPUs = AMDGetGPUInfo()
|
||||||
|
bootstrapped = true
|
||||||
|
if len(cudaGPUs) == 0 && len(rocmGPUs) == 0 && len(oneapiGPUs) == 0 {
|
||||||
|
slog.Info("no compatible GPUs were discovered")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For detected GPUs, load library if not loaded
|
||||||
|
|
||||||
|
// Refresh free memory usage
|
||||||
|
if needRefresh {
|
||||||
|
mem, err := GetCPUMem()
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("error looking up system memory", "error", err)
|
||||||
|
} else {
|
||||||
|
slog.Debug("updating system memory data",
|
||||||
|
slog.Group(
|
||||||
|
"before",
|
||||||
|
"total", format.HumanBytes2(cpus[0].TotalMemory),
|
||||||
|
"free", format.HumanBytes2(cpus[0].FreeMemory),
|
||||||
|
"free_swap", format.HumanBytes2(cpus[0].FreeSwap),
|
||||||
|
),
|
||||||
|
slog.Group(
|
||||||
|
"now",
|
||||||
|
"total", format.HumanBytes2(mem.TotalMemory),
|
||||||
|
"free", format.HumanBytes2(mem.FreeMemory),
|
||||||
|
"free_swap", format.HumanBytes2(mem.FreeSwap),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
cpus[0].FreeMemory = mem.FreeMemory
|
||||||
|
cpus[0].FreeSwap = mem.FreeSwap
|
||||||
|
}
|
||||||
|
|
||||||
|
var memInfo C.mem_info_t
|
||||||
|
if cHandles == nil && len(cudaGPUs) > 0 {
|
||||||
|
cHandles = initCudaHandles()
|
||||||
|
}
|
||||||
|
for i, gpu := range cudaGPUs {
|
||||||
|
if cHandles.nvml != nil {
|
||||||
|
C.nvml_get_free(*cHandles.nvml, C.int(gpu.index), &memInfo.free, &memInfo.total, &memInfo.used)
|
||||||
|
} else if cHandles.cudart != nil {
|
||||||
|
C.cudart_bootstrap(*cHandles.cudart, C.int(gpu.index), &memInfo)
|
||||||
|
} else if cHandles.nvcuda != nil {
|
||||||
|
C.nvcuda_get_free(*cHandles.nvcuda, C.int(gpu.index), &memInfo.free, &memInfo.total)
|
||||||
|
memInfo.used = memInfo.total - memInfo.free
|
||||||
|
} else {
|
||||||
|
// shouldn't happen
|
||||||
|
slog.Warn("no valid cuda library loaded to refresh vram usage")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if memInfo.err != nil {
|
||||||
|
slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||||
|
C.free(unsafe.Pointer(memInfo.err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if memInfo.free == 0 {
|
||||||
|
slog.Warn("error looking up nvidia GPU memory")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if cHandles.nvml != nil && gpu.OSOverhead > 0 {
|
||||||
|
// When using the management library update based on recorded overhead
|
||||||
|
memInfo.free -= C.uint64_t(gpu.OSOverhead)
|
||||||
|
}
|
||||||
|
slog.Debug("updating cuda memory data",
|
||||||
|
"gpu", gpu.ID,
|
||||||
|
"name", gpu.Name,
|
||||||
|
"overhead", format.HumanBytes2(gpu.OSOverhead),
|
||||||
|
slog.Group(
|
||||||
|
"before",
|
||||||
|
"total", format.HumanBytes2(gpu.TotalMemory),
|
||||||
|
"free", format.HumanBytes2(gpu.FreeMemory),
|
||||||
|
),
|
||||||
|
slog.Group(
|
||||||
|
"now",
|
||||||
|
"total", format.HumanBytes2(uint64(memInfo.total)),
|
||||||
|
"free", format.HumanBytes2(uint64(memInfo.free)),
|
||||||
|
"used", format.HumanBytes2(uint64(memInfo.used)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
cudaGPUs[i].FreeMemory = uint64(memInfo.free)
|
||||||
|
}
|
||||||
|
|
||||||
|
if oHandles == nil && len(oneapiGPUs) > 0 {
|
||||||
|
oHandles = initOneAPIHandles()
|
||||||
|
}
|
||||||
|
for i, gpu := range oneapiGPUs {
|
||||||
|
if oHandles.oneapi == nil {
|
||||||
|
// shouldn't happen
|
||||||
|
slog.Warn("nil oneapi handle with device count", "count", oHandles.deviceCount)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
C.oneapi_check_vram(*oHandles.oneapi, C.int(gpu.driverIndex), C.int(gpu.gpuIndex), &memInfo)
|
||||||
|
// TODO - convert this to MinimumMemory based on testing...
|
||||||
|
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
||||||
|
memInfo.free = C.uint64_t(totalFreeMem)
|
||||||
|
oneapiGPUs[i].FreeMemory = uint64(memInfo.free)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = RocmGPUInfoList(rocmGPUs).RefreshFreeMemory()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("problem refreshing ROCm free memory", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := []GpuInfo{}
|
||||||
|
for _, gpu := range cudaGPUs {
|
||||||
|
resp = append(resp, gpu.GpuInfo)
|
||||||
|
}
|
||||||
|
for _, gpu := range rocmGPUs {
|
||||||
|
resp = append(resp, gpu.GpuInfo)
|
||||||
|
}
|
||||||
|
for _, gpu := range oneapiGPUs {
|
||||||
|
resp = append(resp, gpu.GpuInfo)
|
||||||
|
}
|
||||||
|
if len(resp) == 0 {
|
||||||
|
resp = append(resp, cpus[0].GpuInfo)
|
||||||
|
}
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetCPUMem() (memInfo, error) {
|
|
||||||
var ret memInfo
|
|
||||||
var info C.mem_info_t
|
|
||||||
C.cpu_check_ram(&info)
|
|
||||||
if info.err != nil {
|
|
||||||
defer C.free(unsafe.Pointer(info.err))
|
|
||||||
return ret, fmt.Errorf(C.GoString(info.err))
|
|
||||||
}
|
|
||||||
ret.FreeMemory = uint64(info.free)
|
|
||||||
ret.TotalMemory = uint64(info.total)
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
|
func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
|
||||||
// Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
|
// Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
|
||||||
var ldPaths []string
|
var ldPaths []string
|
||||||
@@ -326,6 +490,7 @@ func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
|
|||||||
// Nvidia PhysX known to return bogus results
|
// Nvidia PhysX known to return bogus results
|
||||||
if strings.Contains(pattern, "PhysX") {
|
if strings.Contains(pattern, "PhysX") {
|
||||||
slog.Debug("skipping PhysX cuda library path", "path", pattern)
|
slog.Debug("skipping PhysX cuda library path", "path", pattern)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
// Ignore glob discovery errors
|
// Ignore glob discovery errors
|
||||||
matches, _ := filepath.Glob(pattern)
|
matches, _ := filepath.Glob(pattern)
|
||||||
@@ -382,7 +547,23 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
|||||||
defer C.free(unsafe.Pointer(lib))
|
defer C.free(unsafe.Pointer(lib))
|
||||||
C.nvcuda_init(lib, &resp)
|
C.nvcuda_init(lib, &resp)
|
||||||
if resp.err != nil {
|
if resp.err != nil {
|
||||||
slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err))
|
// Decide what log level based on the type of error message to help users understand why
|
||||||
|
msg := C.GoString(resp.err)
|
||||||
|
switch resp.cudaErr {
|
||||||
|
case C.CUDA_ERROR_INSUFFICIENT_DRIVER, C.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH:
|
||||||
|
slog.Warn("version mismatch between driver and cuda driver library - reboot or upgrade may be required", "library", libPath, "error", msg)
|
||||||
|
case C.CUDA_ERROR_NO_DEVICE:
|
||||||
|
slog.Info("no nvidia devices detected", "library", libPath)
|
||||||
|
case C.CUDA_ERROR_UNKNOWN:
|
||||||
|
slog.Warn("unknown error initializing cuda driver library", "library", libPath, "error", msg)
|
||||||
|
slog.Warn("see https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for more information")
|
||||||
|
default:
|
||||||
|
if strings.Contains(msg, "wrong ELF class") {
|
||||||
|
slog.Debug("skipping 32bit library", "library", libPath)
|
||||||
|
} else {
|
||||||
|
slog.Info("unable to load cuda driver library", "library", libPath, "error", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
C.free(unsafe.Pointer(resp.err))
|
C.free(unsafe.Pointer(resp.err))
|
||||||
} else {
|
} else {
|
||||||
return int(resp.num_devices), &resp.ch, libPath
|
return int(resp.num_devices), &resp.ch, libPath
|
||||||
@@ -391,8 +572,26 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
|||||||
return 0, nil, ""
|
return 0, nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func LoadNVMLMgmt(nvmlLibPaths []string) (*C.nvml_handle_t, string) {
|
||||||
|
var resp C.nvml_init_resp_t
|
||||||
|
resp.ch.verbose = getVerboseState()
|
||||||
|
for _, libPath := range nvmlLibPaths {
|
||||||
|
lib := C.CString(libPath)
|
||||||
|
defer C.free(unsafe.Pointer(lib))
|
||||||
|
C.nvml_init(lib, &resp)
|
||||||
|
if resp.err != nil {
|
||||||
|
slog.Info(fmt.Sprintf("Unable to load NVML management library %s: %s", libPath, C.GoString(resp.err)))
|
||||||
|
C.free(unsafe.Pointer(resp.err))
|
||||||
|
} else {
|
||||||
|
return &resp.ch, libPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
||||||
var resp C.oneapi_init_resp_t
|
var resp C.oneapi_init_resp_t
|
||||||
|
num_devices := 0
|
||||||
resp.oh.verbose = getVerboseState()
|
resp.oh.verbose = getVerboseState()
|
||||||
for _, libPath := range oneapiLibPaths {
|
for _, libPath := range oneapiLibPaths {
|
||||||
lib := C.CString(libPath)
|
lib := C.CString(libPath)
|
||||||
@@ -402,7 +601,10 @@ func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
|||||||
slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
|
slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
|
||||||
C.free(unsafe.Pointer(resp.err))
|
C.free(unsafe.Pointer(resp.err))
|
||||||
} else {
|
} else {
|
||||||
return int(resp.num_devices), &resp.oh, libPath
|
for i := range resp.oh.num_drivers {
|
||||||
|
num_devices += int(C.oneapi_get_device_count(resp.oh, C.int(i)))
|
||||||
|
}
|
||||||
|
return num_devices, &resp.oh, libPath
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0, nil, ""
|
return 0, nil, ""
|
||||||
|
@@ -24,7 +24,7 @@ func GetGPUInfo() GpuInfoList {
|
|||||||
return []GpuInfo{
|
return []GpuInfo{
|
||||||
{
|
{
|
||||||
Library: "cpu",
|
Library: "cpu",
|
||||||
Variant: GetCPUVariant(),
|
Variant: GetCPUCapability(),
|
||||||
memInfo: mem,
|
memInfo: mem,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -42,10 +42,22 @@ func GetGPUInfo() GpuInfoList {
|
|||||||
return []GpuInfo{info}
|
return []GpuInfo{info}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetCPUInfo() GpuInfoList {
|
||||||
|
mem, _ := GetCPUMem()
|
||||||
|
return []GpuInfo{
|
||||||
|
{
|
||||||
|
Library: "cpu",
|
||||||
|
Variant: GetCPUCapability(),
|
||||||
|
memInfo: mem,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func GetCPUMem() (memInfo, error) {
|
func GetCPUMem() (memInfo, error) {
|
||||||
return memInfo{
|
return memInfo{
|
||||||
TotalMemory: uint64(C.getPhysicalMemory()),
|
TotalMemory: uint64(C.getPhysicalMemory()),
|
||||||
FreeMemory: 0,
|
FreeMemory: uint64(C.getFreeMemory()),
|
||||||
|
// FreeSwap omitted as Darwin uses dynamic paging
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -47,6 +47,7 @@ typedef struct mem_info {
|
|||||||
char gpu_name[GPU_NAME_LEN];
|
char gpu_name[GPU_NAME_LEN];
|
||||||
uint64_t total;
|
uint64_t total;
|
||||||
uint64_t free;
|
uint64_t free;
|
||||||
|
uint64_t used;
|
||||||
|
|
||||||
// Compute Capability
|
// Compute Capability
|
||||||
int major;
|
int major;
|
||||||
@@ -62,6 +63,7 @@ void cpu_check_ram(mem_info_t *resp);
|
|||||||
|
|
||||||
#include "gpu_info_cudart.h"
|
#include "gpu_info_cudart.h"
|
||||||
#include "gpu_info_nvcuda.h"
|
#include "gpu_info_nvcuda.h"
|
||||||
|
#include "gpu_info_nvml.h"
|
||||||
#include "gpu_info_oneapi.h"
|
#include "gpu_info_oneapi.h"
|
||||||
|
|
||||||
#endif // __GPU_INFO_H__
|
#endif // __GPU_INFO_H__
|
||||||
|
@@ -1,45 +0,0 @@
|
|||||||
#include "gpu_info.h"
|
|
||||||
// Fallbacks for CPU mode
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
#include <sysinfoapi.h>
|
|
||||||
void cpu_check_ram(mem_info_t *resp) {
|
|
||||||
resp->err = NULL;
|
|
||||||
MEMORYSTATUSEX info;
|
|
||||||
info.dwLength = sizeof(info);
|
|
||||||
if (GlobalMemoryStatusEx(&info) != 0) {
|
|
||||||
resp->total = info.ullTotalPhys;
|
|
||||||
resp->free = info.ullAvailPhys;
|
|
||||||
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "0");
|
|
||||||
} else {
|
|
||||||
resp->err = LOAD_ERR();
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
#elif __linux__
|
|
||||||
#include <errno.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <sys/sysinfo.h>
|
|
||||||
void cpu_check_ram(mem_info_t *resp) {
|
|
||||||
struct sysinfo info;
|
|
||||||
resp->err = NULL;
|
|
||||||
if (sysinfo(&info) != 0) {
|
|
||||||
resp->err = strdup(strerror(errno));
|
|
||||||
} else {
|
|
||||||
resp->total = info.totalram * info.mem_unit;
|
|
||||||
resp->free = info.freeram * info.mem_unit;
|
|
||||||
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "0");
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
#elif __APPLE__
|
|
||||||
// TODO consider an Apple implementation that does something useful
|
|
||||||
// mem_info_t cpu_check_ram() {
|
|
||||||
// mem_info_t resp = {0, 0, NULL};
|
|
||||||
// return resp;
|
|
||||||
// }
|
|
||||||
#else
|
|
||||||
#error "Unsupported platform"
|
|
||||||
#endif
|
|
@@ -40,7 +40,7 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
|||||||
|
|
||||||
for (i = 0; l[i].s != NULL; i++) {
|
for (i = 0; l[i].s != NULL; i++) {
|
||||||
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||||
if (!l[i].p) {
|
if (!*(l[i].p)) {
|
||||||
char *msg = LOAD_ERR();
|
char *msg = LOAD_ERR();
|
||||||
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
||||||
UNLOAD_LIBRARY(resp->ch.handle);
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
@@ -94,7 +94,7 @@ void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void cudart_check_vram(cudart_handle_t h, int i, mem_info_t *resp) {
|
void cudart_bootstrap(cudart_handle_t h, int i, mem_info_t *resp) {
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
cudartMemory_t memInfo = {0,0,0};
|
cudartMemory_t memInfo = {0,0,0};
|
||||||
cudartReturn_t ret;
|
cudartReturn_t ret;
|
||||||
@@ -166,9 +166,11 @@ void cudart_check_vram(cudart_handle_t h, int i, mem_info_t *resp) {
|
|||||||
|
|
||||||
resp->total = memInfo.total;
|
resp->total = memInfo.total;
|
||||||
resp->free = memInfo.free;
|
resp->free = memInfo.free;
|
||||||
|
resp->used = memInfo.used;
|
||||||
|
|
||||||
LOG(h.verbose, "[%s] CUDA totalMem %lu\n", resp->gpu_id, resp->total);
|
LOG(h.verbose, "[%s] CUDA totalMem %lu\n", resp->gpu_id, resp->total);
|
||||||
LOG(h.verbose, "[%s] CUDA freeMem %lu\n", resp->gpu_id, resp->free);
|
LOG(h.verbose, "[%s] CUDA freeMem %lu\n", resp->gpu_id, resp->free);
|
||||||
|
LOG(h.verbose, "[%s] CUDA usedMem %lu\n", resp->gpu_id, resp->used);
|
||||||
LOG(h.verbose, "[%s] Compute Capability %d.%d\n", resp->gpu_id, resp->major, resp->minor);
|
LOG(h.verbose, "[%s] Compute Capability %d.%d\n", resp->gpu_id, resp->major, resp->minor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -140,7 +140,8 @@ typedef struct cudart_init_resp {
|
|||||||
} cudart_init_resp_t;
|
} cudart_init_resp_t;
|
||||||
|
|
||||||
void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp);
|
void cudart_init(char *cudart_lib_path, cudart_init_resp_t *resp);
|
||||||
void cudart_check_vram(cudart_handle_t ch, int device_id, mem_info_t *resp);
|
void cudart_bootstrap(cudart_handle_t ch, int device_id, mem_info_t *resp);
|
||||||
|
// TODO - if we keep this library longer term, add cudart_get_free
|
||||||
void cudart_release(cudart_handle_t ch);
|
void cudart_release(cudart_handle_t ch);
|
||||||
|
|
||||||
#endif // __GPU_INFO_CUDART_H__
|
#endif // __GPU_INFO_CUDART_H__
|
||||||
|
@@ -2,3 +2,4 @@
|
|||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
uint64_t getRecommendedMaxVRAM();
|
uint64_t getRecommendedMaxVRAM();
|
||||||
uint64_t getPhysicalMemory();
|
uint64_t getPhysicalMemory();
|
||||||
|
uint64_t getFreeMemory();
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
// go:build darwin
|
#import <Foundation/Foundation.h>
|
||||||
|
#import <mach/mach.h>
|
||||||
#include "gpu_info_darwin.h"
|
#include "gpu_info_darwin.h"
|
||||||
|
|
||||||
uint64_t getRecommendedMaxVRAM() {
|
uint64_t getRecommendedMaxVRAM() {
|
||||||
@@ -8,6 +9,27 @@ uint64_t getRecommendedMaxVRAM() {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getPhysicalMemory returns the total physical memory in bytes
|
||||||
uint64_t getPhysicalMemory() {
|
uint64_t getPhysicalMemory() {
|
||||||
return [[NSProcessInfo processInfo] physicalMemory];
|
return [NSProcessInfo processInfo].physicalMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFreeMemory returns the total free memory in bytes, including inactive
|
||||||
|
// memory that can be reclaimed by the system.
|
||||||
|
uint64_t getFreeMemory() {
|
||||||
|
mach_port_t host_port = mach_host_self();
|
||||||
|
mach_msg_type_number_t host_size = sizeof(vm_statistics64_data_t) / sizeof(integer_t);
|
||||||
|
vm_size_t pagesize;
|
||||||
|
vm_statistics64_data_t vm_stat;
|
||||||
|
|
||||||
|
host_page_size(host_port, &pagesize);
|
||||||
|
if (host_statistics64(host_port, HOST_VM_INFO64, (host_info64_t)&vm_stat, &host_size) != KERN_SUCCESS) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t free_memory = (uint64_t)vm_stat.free_count * pagesize;
|
||||||
|
free_memory += (uint64_t)vm_stat.speculative_count * pagesize;
|
||||||
|
free_memory += (uint64_t)vm_stat.inactive_count * pagesize;
|
||||||
|
|
||||||
|
return free_memory;
|
||||||
}
|
}
|
||||||
|
@@ -7,6 +7,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
|||||||
CUresult ret;
|
CUresult ret;
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
resp->num_devices = 0;
|
resp->num_devices = 0;
|
||||||
|
resp->cudaErr = CUDA_SUCCESS;
|
||||||
const int buflen = 256;
|
const int buflen = 256;
|
||||||
char buf[buflen + 1];
|
char buf[buflen + 1];
|
||||||
int i;
|
int i;
|
||||||
@@ -38,12 +39,13 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
|||||||
nvcuda_lib_path, msg);
|
nvcuda_lib_path, msg);
|
||||||
free(msg);
|
free(msg);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = -1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; l[i].s != NULL; i++) {
|
for (i = 0; l[i].s != NULL; i++) {
|
||||||
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||||
if (!*l[i].p) {
|
if (!*(l[i].p)) {
|
||||||
char *msg = LOAD_ERR();
|
char *msg = LOAD_ERR();
|
||||||
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
||||||
UNLOAD_LIBRARY(resp->ch.handle);
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
@@ -52,6 +54,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
|||||||
msg);
|
msg);
|
||||||
free(msg);
|
free(msg);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = -1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -61,12 +64,9 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
|||||||
LOG(resp->ch.verbose, "cuInit err: %d\n", ret);
|
LOG(resp->ch.verbose, "cuInit err: %d\n", ret);
|
||||||
UNLOAD_LIBRARY(resp->ch.handle);
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
resp->ch.handle = NULL;
|
resp->ch.handle = NULL;
|
||||||
if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) {
|
snprintf(buf, buflen, "cuda driver library init failure: %d", ret);
|
||||||
resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
snprintf(buf, buflen, "nvcuda init failure: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = ret;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -91,12 +91,13 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
|||||||
resp->ch.handle = NULL;
|
resp->ch.handle = NULL;
|
||||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = ret;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const int buflen = 256;
|
const int buflen = 256;
|
||||||
void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
nvcudaMemory_t memInfo = {0,0};
|
nvcudaMemory_t memInfo = {0,0};
|
||||||
CUresult ret;
|
CUresult ret;
|
||||||
@@ -106,13 +107,13 @@ void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
|||||||
CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
||||||
|
|
||||||
if (h.handle == NULL) {
|
if (h.handle == NULL) {
|
||||||
resp->err = strdup("nvcuda handle isn't initialized");
|
resp->err = strdup("cuda driver library handle isn't initialized");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = (*h.cuDeviceGet)(&device, i);
|
ret = (*h.cuDeviceGet)(&device, i);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
snprintf(buf, buflen, "nvcuda device failed to initialize");
|
snprintf(buf, buflen, "cuda driver library device failed to initialize");
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -168,14 +169,14 @@ void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
|||||||
// To get memory we have to set (and release) a context
|
// To get memory we have to set (and release) a context
|
||||||
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
snprintf(buf, buflen, "nvcuda failed to get primary device context %d", ret);
|
snprintf(buf, buflen, "cuda driver library failed to get device context %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total);
|
ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
snprintf(buf, buflen, "nvcuda device memory info lookup failure %d", ret);
|
snprintf(buf, buflen, "cuda driver library device memory info lookup failure %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
// Best effort on failure...
|
// Best effort on failure...
|
||||||
(*h.cuCtxDestroy)(ctx);
|
(*h.cuCtxDestroy)(ctx);
|
||||||
@@ -193,12 +194,47 @@ void nvcuda_check_vram(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
|||||||
|
|
||||||
ret = (*h.cuCtxDestroy)(ctx);
|
ret = (*h.cuCtxDestroy)(ctx);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
LOG(1, "nvcuda failed to release primary device context %d", ret);
|
LOG(1, "cuda driver library failed to release device context %d", ret);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) {
|
||||||
|
CUresult ret;
|
||||||
|
CUcontext ctx = NULL;
|
||||||
|
CUdevice device = -1;
|
||||||
|
*free = 0;
|
||||||
|
*total = 0;
|
||||||
|
|
||||||
|
ret = (*h.cuDeviceGet)(&device, i);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(1, "cuda driver library device failed to initialize");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// To get memory we have to set (and release) a context
|
||||||
|
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(1, "cuda driver library failed to get device context %d", ret);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*h.cuMemGetInfo_v2)(free, total);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(1, "cuda driver library device memory info lookup failure %d", ret);
|
||||||
|
// Best effort on failure...
|
||||||
|
(*h.cuCtxDestroy)(ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*h.cuCtxDestroy)(ctx);
|
||||||
|
if (ret != CUDA_SUCCESS) {
|
||||||
|
LOG(1, "cuda driver library failed to release device context %d", ret);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvcuda_release(nvcuda_handle_t h) {
|
void nvcuda_release(nvcuda_handle_t h) {
|
||||||
LOG(h.verbose, "releasing nvcuda library\n");
|
LOG(h.verbose, "releasing cuda driver library\n");
|
||||||
UNLOAD_LIBRARY(h.handle);
|
UNLOAD_LIBRARY(h.handle);
|
||||||
// TODO and other context release logic?
|
// TODO and other context release logic?
|
||||||
h.handle = NULL;
|
h.handle = NULL;
|
||||||
|
@@ -7,9 +7,12 @@
|
|||||||
typedef enum cudaError_enum {
|
typedef enum cudaError_enum {
|
||||||
CUDA_SUCCESS = 0,
|
CUDA_SUCCESS = 0,
|
||||||
CUDA_ERROR_INVALID_VALUE = 1,
|
CUDA_ERROR_INVALID_VALUE = 1,
|
||||||
CUDA_ERROR_MEMORY_ALLOCATION = 2,
|
CUDA_ERROR_OUT_OF_MEMORY = 2,
|
||||||
CUDA_ERROR_NOT_INITIALIZED = 3,
|
CUDA_ERROR_NOT_INITIALIZED = 3,
|
||||||
CUDA_ERROR_INSUFFICIENT_DRIVER = 35,
|
CUDA_ERROR_INSUFFICIENT_DRIVER = 35,
|
||||||
|
CUDA_ERROR_NO_DEVICE = 100,
|
||||||
|
CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803,
|
||||||
|
CUDA_ERROR_UNKNOWN = 999,
|
||||||
// Other values omitted for now...
|
// Other values omitted for now...
|
||||||
} CUresult;
|
} CUresult;
|
||||||
|
|
||||||
@@ -64,10 +67,12 @@ typedef struct nvcuda_init_resp {
|
|||||||
char *err; // If err is non-null handle is invalid
|
char *err; // If err is non-null handle is invalid
|
||||||
nvcuda_handle_t ch;
|
nvcuda_handle_t ch;
|
||||||
int num_devices;
|
int num_devices;
|
||||||
|
CUresult cudaErr;
|
||||||
} nvcuda_init_resp_t;
|
} nvcuda_init_resp_t;
|
||||||
|
|
||||||
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp);
|
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp);
|
||||||
void nvcuda_check_vram(nvcuda_handle_t ch, int device_id, mem_info_t *resp);
|
void nvcuda_bootstrap(nvcuda_handle_t ch, int device_id, mem_info_t *resp);
|
||||||
|
void nvcuda_get_free(nvcuda_handle_t ch, int device_id, uint64_t *free, uint64_t *total);
|
||||||
void nvcuda_release(nvcuda_handle_t ch);
|
void nvcuda_release(nvcuda_handle_t ch);
|
||||||
|
|
||||||
#endif // __GPU_INFO_NVCUDA_H__
|
#endif // __GPU_INFO_NVCUDA_H__
|
||||||
|
104
gpu/gpu_info_nvml.c
Normal file
104
gpu/gpu_info_nvml.c
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
#ifndef __APPLE__ // TODO - maybe consider nvidia support on intel macs?
|
||||||
|
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
#include "gpu_info_nvml.h"
|
||||||
|
|
||||||
|
void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp) {
|
||||||
|
nvmlReturn_t ret;
|
||||||
|
resp->err = NULL;
|
||||||
|
const int buflen = 256;
|
||||||
|
char buf[buflen + 1];
|
||||||
|
int i;
|
||||||
|
|
||||||
|
struct lookup {
|
||||||
|
char *s;
|
||||||
|
void **p;
|
||||||
|
} l[] = {
|
||||||
|
{"nvmlInit_v2", (void *)&resp->ch.nvmlInit_v2},
|
||||||
|
{"nvmlShutdown", (void *)&resp->ch.nvmlShutdown},
|
||||||
|
{"nvmlDeviceGetHandleByIndex", (void *)&resp->ch.nvmlDeviceGetHandleByIndex},
|
||||||
|
{"nvmlDeviceGetMemoryInfo", (void *)&resp->ch.nvmlDeviceGetMemoryInfo},
|
||||||
|
{NULL, NULL},
|
||||||
|
};
|
||||||
|
|
||||||
|
resp->ch.handle = LOAD_LIBRARY(nvml_lib_path, RTLD_LAZY);
|
||||||
|
if (!resp->ch.handle) {
|
||||||
|
char *msg = LOAD_ERR();
|
||||||
|
LOG(resp->ch.verbose, "library %s load err: %s\n", nvml_lib_path, msg);
|
||||||
|
snprintf(buf, buflen,
|
||||||
|
"Unable to load %s library to query for Nvidia GPUs: %s",
|
||||||
|
nvml_lib_path, msg);
|
||||||
|
free(msg);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO once we've squashed the remaining corner cases remove this log
|
||||||
|
// LOG(resp->ch.verbose, "wiring nvidia management library functions in %s\n", nvml_lib_path);
|
||||||
|
|
||||||
|
for (i = 0; l[i].s != NULL; i++) {
|
||||||
|
// TODO once we've squashed the remaining corner cases remove this log
|
||||||
|
// LOG(resp->ch.verbose, "dlsym: %s\n", l[i].s);
|
||||||
|
|
||||||
|
*l[i].p = LOAD_SYMBOL(resp->ch.handle, l[i].s);
|
||||||
|
if (!*(l[i].p)) {
|
||||||
|
resp->ch.handle = NULL;
|
||||||
|
char *msg = LOAD_ERR();
|
||||||
|
LOG(resp->ch.verbose, "dlerr: %s\n", msg);
|
||||||
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
|
snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s,
|
||||||
|
msg);
|
||||||
|
free(msg);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*resp->ch.nvmlInit_v2)();
|
||||||
|
if (ret != NVML_SUCCESS) {
|
||||||
|
LOG(resp->ch.verbose, "nvmlInit_v2 err: %d\n", ret);
|
||||||
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
|
resp->ch.handle = NULL;
|
||||||
|
snprintf(buf, buflen, "nvml vram init failure: %d", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void nvml_get_free(nvml_handle_t h, int device_id, uint64_t *free, uint64_t *total, uint64_t *used) {
|
||||||
|
nvmlDevice_t device;
|
||||||
|
nvmlMemory_t memInfo = {0};
|
||||||
|
nvmlReturn_t ret;
|
||||||
|
ret = (*h.nvmlDeviceGetHandleByIndex)(device_id, &device);
|
||||||
|
if (ret != NVML_SUCCESS) {
|
||||||
|
LOG(1, "unable to get device handle %d: %d", device_id, ret);
|
||||||
|
*free = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = (*h.nvmlDeviceGetMemoryInfo)(device, &memInfo);
|
||||||
|
if (ret != NVML_SUCCESS) {
|
||||||
|
LOG(1, "device memory info lookup failure %d: %d", device_id, ret);
|
||||||
|
*free = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
*free = memInfo.free;
|
||||||
|
*total = memInfo.total;
|
||||||
|
*used = memInfo.used;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void nvml_release(nvml_handle_t h) {
|
||||||
|
LOG(h.verbose, "releasing nvml library\n");
|
||||||
|
nvmlReturn_t ret;
|
||||||
|
ret = (*h.nvmlShutdown)();
|
||||||
|
if (ret != NVML_SUCCESS) {
|
||||||
|
LOG(1, "error during nvmlShutdown %d", ret);
|
||||||
|
}
|
||||||
|
UNLOAD_LIBRARY(h.handle);
|
||||||
|
h.handle = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // __APPLE__
|
48
gpu/gpu_info_nvml.h
Normal file
48
gpu/gpu_info_nvml.h
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
#ifndef __APPLE__
|
||||||
|
#ifndef __GPU_INFO_NVML_H__
|
||||||
|
#define __GPU_INFO_NVML_H__
|
||||||
|
#include "gpu_info.h"
|
||||||
|
|
||||||
|
// Just enough typedef's to dlopen/dlsym for memory information
|
||||||
|
typedef enum nvmlReturn_enum {
|
||||||
|
NVML_SUCCESS = 0,
|
||||||
|
// Other values omitted for now...
|
||||||
|
} nvmlReturn_t;
|
||||||
|
typedef void *nvmlDevice_t; // Opaque is sufficient
|
||||||
|
typedef struct nvmlMemory_st {
|
||||||
|
unsigned long long total;
|
||||||
|
unsigned long long free;
|
||||||
|
unsigned long long used;
|
||||||
|
} nvmlMemory_t;
|
||||||
|
|
||||||
|
typedef enum nvmlBrandType_enum
|
||||||
|
{
|
||||||
|
NVML_BRAND_UNKNOWN = 0,
|
||||||
|
} nvmlBrandType_t;
|
||||||
|
|
||||||
|
typedef struct nvml_handle {
|
||||||
|
void *handle;
|
||||||
|
uint16_t verbose;
|
||||||
|
nvmlReturn_t (*nvmlInit_v2)(void);
|
||||||
|
nvmlReturn_t (*nvmlShutdown)(void);
|
||||||
|
nvmlReturn_t (*nvmlDeviceGetHandleByIndex)(unsigned int, nvmlDevice_t *);
|
||||||
|
nvmlReturn_t (*nvmlDeviceGetMemoryInfo)(nvmlDevice_t, nvmlMemory_t *);
|
||||||
|
} nvml_handle_t;
|
||||||
|
|
||||||
|
typedef struct nvml_init_resp {
|
||||||
|
char *err; // If err is non-null handle is invalid
|
||||||
|
nvml_handle_t ch;
|
||||||
|
} nvml_init_resp_t;
|
||||||
|
|
||||||
|
typedef struct nvml_compute_capability {
|
||||||
|
char *err;
|
||||||
|
int major;
|
||||||
|
int minor;
|
||||||
|
} nvml_compute_capability_t;
|
||||||
|
|
||||||
|
void nvml_init(char *nvml_lib_path, nvml_init_resp_t *resp);
|
||||||
|
void nvml_get_free(nvml_handle_t ch, int device_id, uint64_t *free, uint64_t *total, uint64_t *used);
|
||||||
|
void nvml_release(nvml_handle_t ch);
|
||||||
|
|
||||||
|
#endif // __GPU_INFO_NVML_H__
|
||||||
|
#endif // __APPLE__
|
@@ -4,15 +4,17 @@
|
|||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp) {
|
||||||
{
|
|
||||||
ze_result_t ret;
|
ze_result_t ret;
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
|
resp->oh.devices = NULL;
|
||||||
|
resp->oh.num_devices = NULL;
|
||||||
|
resp->oh.drivers = NULL;
|
||||||
|
resp->oh.num_drivers = 0;
|
||||||
const int buflen = 256;
|
const int buflen = 256;
|
||||||
char buf[buflen + 1];
|
char buf[buflen + 1];
|
||||||
int i;
|
int i, d;
|
||||||
struct lookup
|
struct lookup {
|
||||||
{
|
|
||||||
char *s;
|
char *s;
|
||||||
void **p;
|
void **p;
|
||||||
} l[] = {
|
} l[] = {
|
||||||
@@ -28,8 +30,7 @@ void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
|||||||
};
|
};
|
||||||
|
|
||||||
resp->oh.handle = LOAD_LIBRARY(oneapi_lib_path, RTLD_LAZY);
|
resp->oh.handle = LOAD_LIBRARY(oneapi_lib_path, RTLD_LAZY);
|
||||||
if (!resp->oh.handle)
|
if (!resp->oh.handle) {
|
||||||
{
|
|
||||||
char *msg = LOAD_ERR();
|
char *msg = LOAD_ERR();
|
||||||
snprintf(buf, buflen,
|
snprintf(buf, buflen,
|
||||||
"Unable to load %s library to query for Intel GPUs: %s\n",
|
"Unable to load %s library to query for Intel GPUs: %s\n",
|
||||||
@@ -44,14 +45,12 @@ void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
|||||||
"wiring Level-Zero management library functions in %s\n",
|
"wiring Level-Zero management library functions in %s\n",
|
||||||
oneapi_lib_path);
|
oneapi_lib_path);
|
||||||
|
|
||||||
for (i = 0; l[i].s != NULL; i++)
|
for (i = 0; l[i].s != NULL; i++) {
|
||||||
{
|
|
||||||
// TODO once we've squashed the remaining corner cases remove this log
|
// TODO once we've squashed the remaining corner cases remove this log
|
||||||
LOG(resp->oh.verbose, "dlsym: %s\n", l[i].s);
|
LOG(resp->oh.verbose, "dlsym: %s\n", l[i].s);
|
||||||
|
|
||||||
*l[i].p = LOAD_SYMBOL(resp->oh.handle, l[i].s);
|
*l[i].p = LOAD_SYMBOL(resp->oh.handle, l[i].s);
|
||||||
if (!l[i].p)
|
if (!*(l[i].p)) {
|
||||||
{
|
|
||||||
resp->oh.handle = NULL;
|
resp->oh.handle = NULL;
|
||||||
char *msg = LOAD_ERR();
|
char *msg = LOAD_ERR();
|
||||||
LOG(resp->oh.verbose, "dlerr: %s\n", msg);
|
LOG(resp->oh.verbose, "dlerr: %s\n", msg);
|
||||||
@@ -63,23 +62,70 @@ void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG(resp->oh.verbose, "calling zesInit\n");
|
||||||
|
|
||||||
ret = (*resp->oh.zesInit)(0);
|
ret = (*resp->oh.zesInit)(0);
|
||||||
if (ret != ZE_RESULT_SUCCESS)
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
{
|
LOG(resp->oh.verbose, "zesInit err: %x\n", ret);
|
||||||
LOG(resp->oh.verbose, "zesInit err: %d\n", ret);
|
snprintf(buf, buflen, "oneapi vram init failure: %x", ret);
|
||||||
UNLOAD_LIBRARY(resp->oh.handle);
|
|
||||||
resp->oh.handle = NULL;
|
|
||||||
snprintf(buf, buflen, "oneapi vram init failure: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
oneapi_release(resp->oh);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
(*resp->oh.zesDriverGet)(&resp->num_devices, NULL);
|
LOG(resp->oh.verbose, "calling zesDriverGet\n");
|
||||||
|
ret = (*resp->oh.zesDriverGet)(&resp->oh.num_drivers, NULL);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
|
LOG(resp->oh.verbose, "zesDriverGet err: %x\n", ret);
|
||||||
|
snprintf(buf, buflen, "unable to get driver count: %x", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
oneapi_release(resp->oh);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
LOG(resp->oh.verbose, "oneapi driver count: %d\n", resp->oh.num_drivers);
|
||||||
|
resp->oh.drivers = malloc(resp->oh.num_drivers * sizeof(zes_driver_handle_t));
|
||||||
|
resp->oh.num_devices = malloc(resp->oh.num_drivers * sizeof(uint32_t));
|
||||||
|
memset(&resp->oh.num_devices[0], 0, resp->oh.num_drivers * sizeof(uint32_t));
|
||||||
|
resp->oh.devices =
|
||||||
|
malloc(resp->oh.num_drivers * sizeof(zes_device_handle_t *));
|
||||||
|
ret = (*resp->oh.zesDriverGet)(&resp->oh.num_drivers, &resp->oh.drivers[0]);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
|
LOG(resp->oh.verbose, "zesDriverGet err: %x\n", ret);
|
||||||
|
snprintf(buf, buflen, "unable to get driver count: %x", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
oneapi_release(resp->oh);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (d = 0; d < resp->oh.num_drivers; d++) {
|
||||||
|
LOG(resp->oh.verbose, "calling zesDeviceGet count %d: %p\n", d, resp->oh.drivers[d]);
|
||||||
|
ret = (*resp->oh.zesDeviceGet)(resp->oh.drivers[d],
|
||||||
|
&resp->oh.num_devices[d], NULL);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
|
LOG(resp->oh.verbose, "zesDeviceGet err: %x\n", ret);
|
||||||
|
snprintf(buf, buflen, "unable to get device count: %x", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
oneapi_release(resp->oh);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
resp->oh.devices[d] =
|
||||||
|
malloc(resp->oh.num_devices[d] * sizeof(zes_device_handle_t));
|
||||||
|
ret = (*resp->oh.zesDeviceGet)(
|
||||||
|
resp->oh.drivers[d], &resp->oh.num_devices[d], resp->oh.devices[d]);
|
||||||
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
|
LOG(resp->oh.verbose, "zesDeviceGet err: %x\n", ret);
|
||||||
|
snprintf(buf, buflen, "unable to get device count: %x", ret);
|
||||||
|
resp->err = strdup(buf);
|
||||||
|
oneapi_release(resp->oh);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
void oneapi_check_vram(oneapi_handle_t h, int driver, int device,
|
||||||
{
|
mem_info_t *resp) {
|
||||||
ze_result_t ret;
|
ze_result_t ret;
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
uint64_t totalMem = 0;
|
uint64_t totalMem = 0;
|
||||||
@@ -88,49 +134,19 @@ void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
|||||||
char buf[buflen + 1];
|
char buf[buflen + 1];
|
||||||
int i, d, m;
|
int i, d, m;
|
||||||
|
|
||||||
if (h.handle == NULL)
|
if (h.handle == NULL) {
|
||||||
{
|
|
||||||
resp->err = strdup("Level-Zero handle not initialized");
|
resp->err = strdup("Level-Zero handle not initialized");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t driversCount = 0;
|
if (driver > h.num_drivers || device > h.num_devices[driver]) {
|
||||||
ret = (*h.zesDriverGet)(&driversCount, NULL);
|
resp->err = strdup("driver of device index out of bounds");
|
||||||
if (ret != ZE_RESULT_SUCCESS)
|
|
||||||
{
|
|
||||||
snprintf(buf, buflen, "unable to get driver count: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LOG(h.verbose, "discovered %d Level-Zero drivers\n", driversCount);
|
|
||||||
|
|
||||||
zes_driver_handle_t *allDrivers =
|
|
||||||
malloc(driversCount * sizeof(zes_driver_handle_t));
|
|
||||||
(*h.zesDriverGet)(&driversCount, allDrivers);
|
|
||||||
|
|
||||||
resp->total = 0;
|
resp->total = 0;
|
||||||
resp->free = 0;
|
resp->free = 0;
|
||||||
|
|
||||||
for (d = 0; d < driversCount; d++)
|
|
||||||
{
|
|
||||||
uint32_t deviceCount = 0;
|
|
||||||
ret = (*h.zesDeviceGet)(allDrivers[d], &deviceCount, NULL);
|
|
||||||
if (ret != ZE_RESULT_SUCCESS)
|
|
||||||
{
|
|
||||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
|
||||||
free(allDrivers);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG(h.verbose, "discovered %d Level-Zero devices\n", deviceCount);
|
|
||||||
|
|
||||||
zes_device_handle_t *devices =
|
|
||||||
malloc(deviceCount * sizeof(zes_device_handle_t));
|
|
||||||
(*h.zesDeviceGet)(allDrivers[d], &deviceCount, devices);
|
|
||||||
|
|
||||||
for (i = 0; i < deviceCount; i++)
|
|
||||||
{
|
|
||||||
zes_device_ext_properties_t ext_props;
|
zes_device_ext_properties_t ext_props;
|
||||||
ext_props.stype = ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES;
|
ext_props.stype = ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES;
|
||||||
ext_props.pNext = NULL;
|
ext_props.pNext = NULL;
|
||||||
@@ -139,61 +155,61 @@ void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
|||||||
props.stype = ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES;
|
props.stype = ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES;
|
||||||
props.pNext = &ext_props;
|
props.pNext = &ext_props;
|
||||||
|
|
||||||
ret = (*h.zesDeviceGetProperties)(devices[i], &props);
|
ret = (*h.zesDeviceGetProperties)(h.devices[driver][device], &props);
|
||||||
if (ret != ZE_RESULT_SUCCESS)
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
{
|
|
||||||
snprintf(buf, buflen, "unable to get device properties: %d", ret);
|
snprintf(buf, buflen, "unable to get device properties: %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
free(allDrivers);
|
|
||||||
free(devices);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h.verbose)
|
snprintf(&resp->gpu_name[0], GPU_NAME_LEN, "%s", props.modelName);
|
||||||
{
|
|
||||||
|
// TODO this needs to map to ONEAPI_DEVICE_SELECTOR syntax
|
||||||
|
// (this is probably wrong...)
|
||||||
|
// TODO - the driver isn't included - what if there are multiple drivers?
|
||||||
|
snprintf(&resp->gpu_id[0], GPU_ID_LEN, "%d", device);
|
||||||
|
|
||||||
|
if (h.verbose) {
|
||||||
// When in verbose mode, report more information about
|
// When in verbose mode, report more information about
|
||||||
// the card we discover.
|
// the card we discover.
|
||||||
LOG(h.verbose, "[%d] oneAPI device name: %s\n", i,
|
LOG(h.verbose, "[%d:%d] oneAPI device name: %s\n", driver, device,
|
||||||
props.modelName);
|
props.modelName);
|
||||||
LOG(h.verbose, "[%d] oneAPI brand: %s\n", i,
|
LOG(h.verbose, "[%d:%d] oneAPI brand: %s\n", driver, device,
|
||||||
props.brandName);
|
props.brandName);
|
||||||
LOG(h.verbose, "[%d] oneAPI vendor: %s\n", i,
|
LOG(h.verbose, "[%d:%d] oneAPI vendor: %s\n", driver, device,
|
||||||
props.vendorName);
|
props.vendorName);
|
||||||
LOG(h.verbose, "[%d] oneAPI S/N: %s\n", i,
|
LOG(h.verbose, "[%d:%d] oneAPI S/N: %s\n", driver, device,
|
||||||
props.serialNumber);
|
props.serialNumber);
|
||||||
LOG(h.verbose, "[%d] oneAPI board number: %s\n", i,
|
LOG(h.verbose, "[%d:%d] oneAPI board number: %s\n", driver, device,
|
||||||
props.boardNumber);
|
props.boardNumber);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
// Compute Capability equivalent in resp->major, resp->minor, resp->patch
|
||||||
|
|
||||||
uint32_t memCount = 0;
|
uint32_t memCount = 0;
|
||||||
ret = (*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, NULL);
|
ret = (*h.zesDeviceEnumMemoryModules)(h.devices[driver][device], &memCount,
|
||||||
if (ret != ZE_RESULT_SUCCESS)
|
NULL);
|
||||||
{
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
snprintf(buf, buflen,
|
snprintf(buf, buflen, "unable to enumerate Level-Zero memory modules: %x",
|
||||||
"unable to enumerate Level-Zero memory modules: %d", ret);
|
ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
free(allDrivers);
|
|
||||||
free(devices);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG(h.verbose, "discovered %d Level-Zero memory modules\n", memCount);
|
LOG(h.verbose, "discovered %d Level-Zero memory modules\n", memCount);
|
||||||
|
|
||||||
zes_mem_handle_t *mems = malloc(memCount * sizeof(zes_mem_handle_t));
|
zes_mem_handle_t *mems = malloc(memCount * sizeof(zes_mem_handle_t));
|
||||||
(*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, mems);
|
(*h.zesDeviceEnumMemoryModules)(h.devices[driver][device], &memCount, mems);
|
||||||
|
|
||||||
for (m = 0; m < memCount; m++)
|
for (m = 0; m < memCount; m++) {
|
||||||
{
|
|
||||||
zes_mem_state_t state;
|
zes_mem_state_t state;
|
||||||
state.stype = ZES_STRUCTURE_TYPE_MEM_STATE;
|
state.stype = ZES_STRUCTURE_TYPE_MEM_STATE;
|
||||||
state.pNext = NULL;
|
state.pNext = NULL;
|
||||||
ret = (*h.zesMemoryGetState)(mems[m], &state);
|
ret = (*h.zesMemoryGetState)(mems[m], &state);
|
||||||
if (ret != ZE_RESULT_SUCCESS)
|
if (ret != ZE_RESULT_SUCCESS) {
|
||||||
{
|
snprintf(buf, buflen, "unable to get memory state: %x", ret);
|
||||||
snprintf(buf, buflen, "unable to get memory state: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
free(allDrivers);
|
|
||||||
free(devices);
|
|
||||||
free(mems);
|
free(mems);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -205,10 +221,39 @@ void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
|||||||
free(mems);
|
free(mems);
|
||||||
}
|
}
|
||||||
|
|
||||||
free(devices);
|
void oneapi_release(oneapi_handle_t h) {
|
||||||
|
int d;
|
||||||
|
LOG(h.verbose, "releasing oneapi library\n");
|
||||||
|
for (d = 0; d < h.num_drivers; d++) {
|
||||||
|
if (h.devices != NULL && h.devices[d] != NULL) {
|
||||||
|
free(h.devices[d]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (h.devices != NULL) {
|
||||||
|
free(h.devices);
|
||||||
|
h.devices = NULL;
|
||||||
|
}
|
||||||
|
if (h.num_devices != NULL) {
|
||||||
|
free(h.num_devices);
|
||||||
|
h.num_devices = NULL;
|
||||||
|
}
|
||||||
|
if (h.drivers != NULL) {
|
||||||
|
free(h.drivers);
|
||||||
|
h.drivers = NULL;
|
||||||
|
}
|
||||||
|
h.num_drivers = 0;
|
||||||
|
UNLOAD_LIBRARY(h.handle);
|
||||||
|
h.handle = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
free(allDrivers);
|
int oneapi_get_device_count(oneapi_handle_t h, int driver) {
|
||||||
|
if (h.handle == NULL || h.num_devices == NULL) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (driver > h.num_drivers) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return (int)h.num_devices[driver];
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // __APPLE__
|
#endif // __APPLE__
|
||||||
|
@@ -9,8 +9,7 @@
|
|||||||
#define ZE_BIT(_i) (1 << _i)
|
#define ZE_BIT(_i) (1 << _i)
|
||||||
|
|
||||||
// Just enough typedef's to dlopen/dlsym for memory information
|
// Just enough typedef's to dlopen/dlsym for memory information
|
||||||
typedef enum ze_result_t
|
typedef enum ze_result_t {
|
||||||
{
|
|
||||||
ZE_RESULT_SUCCESS = 0,
|
ZE_RESULT_SUCCESS = 0,
|
||||||
// Other values omitted for now...
|
// Other values omitted for now...
|
||||||
} ze_result_t;
|
} ze_result_t;
|
||||||
@@ -20,13 +19,11 @@ typedef struct _zes_driver_handle_t *zes_driver_handle_t;
|
|||||||
typedef struct _zes_device_handle_t *zes_device_handle_t;
|
typedef struct _zes_device_handle_t *zes_device_handle_t;
|
||||||
typedef struct _zes_mem_handle_t *zes_mem_handle_t;
|
typedef struct _zes_mem_handle_t *zes_mem_handle_t;
|
||||||
|
|
||||||
typedef enum _ze_structure_type_t
|
typedef enum _ze_structure_type_t {
|
||||||
{
|
|
||||||
ZE_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
ZE_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
} ze_structure_type_t;
|
} ze_structure_type_t;
|
||||||
|
|
||||||
typedef enum _zes_structure_type_t
|
typedef enum _zes_structure_type_t {
|
||||||
{
|
|
||||||
ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES = 0x1,
|
ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES = 0x1,
|
||||||
ZES_STRUCTURE_TYPE_MEM_PROPERTIES = 0xb,
|
ZES_STRUCTURE_TYPE_MEM_PROPERTIES = 0xb,
|
||||||
ZES_STRUCTURE_TYPE_MEM_STATE = 0x1e,
|
ZES_STRUCTURE_TYPE_MEM_STATE = 0x1e,
|
||||||
@@ -34,35 +31,29 @@ typedef enum _zes_structure_type_t
|
|||||||
ZES_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
ZES_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
} zes_structure_type_t;
|
} zes_structure_type_t;
|
||||||
|
|
||||||
typedef enum _zes_mem_type_t
|
typedef enum _zes_mem_type_t {
|
||||||
{
|
|
||||||
ZES_MEM_TYPE_FORCE_UINT32 = 0x7fffffff
|
ZES_MEM_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
} zes_mem_type_t;
|
} zes_mem_type_t;
|
||||||
|
|
||||||
typedef enum _zes_mem_loc_t
|
typedef enum _zes_mem_loc_t {
|
||||||
{
|
|
||||||
ZES_MEM_LOC_SYSTEM = 0,
|
ZES_MEM_LOC_SYSTEM = 0,
|
||||||
ZES_MEM_LOC_DEVICE = 1,
|
ZES_MEM_LOC_DEVICE = 1,
|
||||||
ZES_MEM_LOC_FORCE_UINT32 = 0x7fffffff
|
ZES_MEM_LOC_FORCE_UINT32 = 0x7fffffff
|
||||||
} zes_mem_loc_t;
|
} zes_mem_loc_t;
|
||||||
|
|
||||||
typedef enum _zes_mem_health_t
|
typedef enum _zes_mem_health_t {
|
||||||
{
|
|
||||||
ZES_MEM_HEALTH_FORCE_UINT32 = 0x7fffffff
|
ZES_MEM_HEALTH_FORCE_UINT32 = 0x7fffffff
|
||||||
} zes_mem_health_t;
|
} zes_mem_health_t;
|
||||||
|
|
||||||
typedef struct _ze_device_uuid_t
|
typedef struct _ze_device_uuid_t {
|
||||||
{
|
|
||||||
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||||
} ze_device_uuid_t;
|
} ze_device_uuid_t;
|
||||||
|
|
||||||
typedef struct _zes_uuid_t
|
typedef struct _zes_uuid_t {
|
||||||
{
|
|
||||||
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||||
} zes_uuid_t;
|
} zes_uuid_t;
|
||||||
|
|
||||||
typedef enum _ze_device_type_t
|
typedef enum _ze_device_type_t {
|
||||||
{
|
|
||||||
ZE_DEVICE_TYPE_GPU = 1,
|
ZE_DEVICE_TYPE_GPU = 1,
|
||||||
ZE_DEVICE_TYPE_CPU = 2,
|
ZE_DEVICE_TYPE_CPU = 2,
|
||||||
ZE_DEVICE_TYPE_FPGA = 3,
|
ZE_DEVICE_TYPE_FPGA = 3,
|
||||||
@@ -71,8 +62,7 @@ typedef enum _ze_device_type_t
|
|||||||
ZE_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff
|
ZE_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||||
} ze_device_type_t;
|
} ze_device_type_t;
|
||||||
|
|
||||||
typedef enum _zes_device_type_t
|
typedef enum _zes_device_type_t {
|
||||||
{
|
|
||||||
ZES_DEVICE_TYPE_GPU = 1,
|
ZES_DEVICE_TYPE_GPU = 1,
|
||||||
ZES_DEVICE_TYPE_CPU = 2,
|
ZES_DEVICE_TYPE_CPU = 2,
|
||||||
ZES_DEVICE_TYPE_FPGA = 3,
|
ZES_DEVICE_TYPE_FPGA = 3,
|
||||||
@@ -82,8 +72,7 @@ typedef enum _zes_device_type_t
|
|||||||
} zes_device_type_t;
|
} zes_device_type_t;
|
||||||
|
|
||||||
typedef uint32_t ze_device_property_flags_t;
|
typedef uint32_t ze_device_property_flags_t;
|
||||||
typedef enum _ze_device_property_flag_t
|
typedef enum _ze_device_property_flag_t {
|
||||||
{
|
|
||||||
ZE_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
ZE_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||||
ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||||
ZE_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
ZE_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||||
@@ -92,8 +81,7 @@ typedef enum _ze_device_property_flag_t
|
|||||||
} ze_device_property_flag_t;
|
} ze_device_property_flag_t;
|
||||||
|
|
||||||
typedef uint32_t zes_device_property_flags_t;
|
typedef uint32_t zes_device_property_flags_t;
|
||||||
typedef enum _zes_device_property_flag_t
|
typedef enum _zes_device_property_flag_t {
|
||||||
{
|
|
||||||
ZES_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
ZES_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||||
ZES_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
ZES_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||||
ZES_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
ZES_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||||
@@ -101,8 +89,7 @@ typedef enum _zes_device_property_flag_t
|
|||||||
ZES_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff
|
ZES_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff
|
||||||
} zes_device_property_flag_t;
|
} zes_device_property_flag_t;
|
||||||
|
|
||||||
typedef struct _ze_device_properties_t
|
typedef struct _ze_device_properties_t {
|
||||||
{
|
|
||||||
ze_structure_type_t stype;
|
ze_structure_type_t stype;
|
||||||
void *pNext;
|
void *pNext;
|
||||||
ze_device_type_t type;
|
ze_device_type_t type;
|
||||||
@@ -126,8 +113,7 @@ typedef struct _ze_device_properties_t
|
|||||||
char name[ZE_MAX_DEVICE_NAME];
|
char name[ZE_MAX_DEVICE_NAME];
|
||||||
} ze_device_properties_t;
|
} ze_device_properties_t;
|
||||||
|
|
||||||
typedef struct _zes_device_properties_t
|
typedef struct _zes_device_properties_t {
|
||||||
{
|
|
||||||
zes_structure_type_t stype;
|
zes_structure_type_t stype;
|
||||||
void *pNext;
|
void *pNext;
|
||||||
ze_device_properties_t core;
|
ze_device_properties_t core;
|
||||||
@@ -140,8 +126,7 @@ typedef struct _zes_device_properties_t
|
|||||||
char driverVersion[ZES_STRING_PROPERTY_SIZE];
|
char driverVersion[ZES_STRING_PROPERTY_SIZE];
|
||||||
} zes_device_properties_t;
|
} zes_device_properties_t;
|
||||||
|
|
||||||
typedef struct _zes_device_ext_properties_t
|
typedef struct _zes_device_ext_properties_t {
|
||||||
{
|
|
||||||
zes_structure_type_t stype;
|
zes_structure_type_t stype;
|
||||||
void *pNext;
|
void *pNext;
|
||||||
zes_uuid_t uuid;
|
zes_uuid_t uuid;
|
||||||
@@ -149,8 +134,7 @@ typedef struct _zes_device_ext_properties_t
|
|||||||
zes_device_property_flags_t flags;
|
zes_device_property_flags_t flags;
|
||||||
} zes_device_ext_properties_t;
|
} zes_device_ext_properties_t;
|
||||||
|
|
||||||
typedef struct _zes_mem_properties_t
|
typedef struct _zes_mem_properties_t {
|
||||||
{
|
|
||||||
zes_structure_type_t stype;
|
zes_structure_type_t stype;
|
||||||
void *pNext;
|
void *pNext;
|
||||||
zes_mem_type_t type;
|
zes_mem_type_t type;
|
||||||
@@ -162,8 +146,7 @@ typedef struct _zes_mem_properties_t
|
|||||||
int32_t numChannels;
|
int32_t numChannels;
|
||||||
} zes_mem_properties_t;
|
} zes_mem_properties_t;
|
||||||
|
|
||||||
typedef struct _zes_mem_state_t
|
typedef struct _zes_mem_state_t {
|
||||||
{
|
|
||||||
zes_structure_type_t stype;
|
zes_structure_type_t stype;
|
||||||
const void *pNext;
|
const void *pNext;
|
||||||
zes_mem_health_t health;
|
zes_mem_health_t health;
|
||||||
@@ -171,10 +154,19 @@ typedef struct _zes_mem_state_t
|
|||||||
uint64_t size;
|
uint64_t size;
|
||||||
} zes_mem_state_t;
|
} zes_mem_state_t;
|
||||||
|
|
||||||
typedef struct oneapi_handle
|
typedef struct oneapi_handle {
|
||||||
{
|
|
||||||
void *handle;
|
void *handle;
|
||||||
uint16_t verbose;
|
uint16_t verbose;
|
||||||
|
|
||||||
|
uint32_t num_drivers;
|
||||||
|
zes_driver_handle_t *drivers;
|
||||||
|
uint32_t *num_devices;
|
||||||
|
zes_device_handle_t **devices;
|
||||||
|
|
||||||
|
// TODO Driver major, minor information
|
||||||
|
// int driver_major;
|
||||||
|
// int driver_minor;
|
||||||
|
|
||||||
ze_result_t (*zesInit)(int);
|
ze_result_t (*zesInit)(int);
|
||||||
ze_result_t (*zesDriverGet)(uint32_t *pCount, zes_driver_handle_t *phDrivers);
|
ze_result_t (*zesDriverGet)(uint32_t *pCount, zes_driver_handle_t *phDrivers);
|
||||||
ze_result_t (*zesDeviceGet)(zes_driver_handle_t hDriver, uint32_t *pCount,
|
ze_result_t (*zesDeviceGet)(zes_driver_handle_t hDriver, uint32_t *pCount,
|
||||||
@@ -191,21 +183,21 @@ typedef struct oneapi_handle
|
|||||||
|
|
||||||
} oneapi_handle_t;
|
} oneapi_handle_t;
|
||||||
|
|
||||||
typedef struct oneapi_init_resp
|
typedef struct oneapi_init_resp {
|
||||||
{
|
|
||||||
char *err; // If err is non-null handle is invalid
|
char *err; // If err is non-null handle is invalid
|
||||||
int num_devices;
|
|
||||||
oneapi_handle_t oh;
|
oneapi_handle_t oh;
|
||||||
} oneapi_init_resp_t;
|
} oneapi_init_resp_t;
|
||||||
|
|
||||||
typedef struct oneapi_version_resp
|
typedef struct oneapi_version_resp {
|
||||||
{
|
|
||||||
ze_result_t status;
|
ze_result_t status;
|
||||||
char *str; // Contains version or error string if status != 0
|
char *str; // Contains version or error string if status != 0
|
||||||
} oneapi_version_resp_t;
|
} oneapi_version_resp_t;
|
||||||
|
|
||||||
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp);
|
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp);
|
||||||
void oneapi_check_vram(oneapi_handle_t rh, mem_info_t *resp);
|
void oneapi_check_vram(oneapi_handle_t h, int driver, int device,
|
||||||
|
mem_info_t *resp);
|
||||||
|
void oneapi_release(oneapi_handle_t h);
|
||||||
|
int oneapi_get_device_count(oneapi_handle_t h, int driver);
|
||||||
|
|
||||||
#endif // __GPU_INFO_INTEL_H__
|
#endif // __GPU_INFO_INTEL_H__
|
||||||
#endif // __APPLE__
|
#endif // __APPLE__
|
||||||
|
90
gpu/gpu_linux.go
Normal file
90
gpu/gpu_linux.go
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
package gpu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/format"
|
||||||
|
)
|
||||||
|
|
||||||
|
var CudartGlobs = []string{
|
||||||
|
"/usr/local/cuda/lib64/libcudart.so*",
|
||||||
|
"/usr/lib/x86_64-linux-gnu/nvidia/current/libcudart.so*",
|
||||||
|
"/usr/lib/x86_64-linux-gnu/libcudart.so*",
|
||||||
|
"/usr/lib/wsl/lib/libcudart.so*",
|
||||||
|
"/usr/lib/wsl/drivers/*/libcudart.so*",
|
||||||
|
"/opt/cuda/lib64/libcudart.so*",
|
||||||
|
"/usr/local/cuda*/targets/aarch64-linux/lib/libcudart.so*",
|
||||||
|
"/usr/lib/aarch64-linux-gnu/nvidia/current/libcudart.so*",
|
||||||
|
"/usr/lib/aarch64-linux-gnu/libcudart.so*",
|
||||||
|
"/usr/local/cuda/lib*/libcudart.so*",
|
||||||
|
"/usr/lib*/libcudart.so*",
|
||||||
|
"/usr/local/lib*/libcudart.so*",
|
||||||
|
}
|
||||||
|
|
||||||
|
var NvmlGlobs = []string{}
|
||||||
|
|
||||||
|
var NvcudaGlobs = []string{
|
||||||
|
"/usr/local/cuda*/targets/*/lib/libcuda.so*",
|
||||||
|
"/usr/lib/*-linux-gnu/nvidia/current/libcuda.so*",
|
||||||
|
"/usr/lib/*-linux-gnu/libcuda.so*",
|
||||||
|
"/usr/lib/wsl/lib/libcuda.so*",
|
||||||
|
"/usr/lib/wsl/drivers/*/libcuda.so*",
|
||||||
|
"/opt/cuda/lib*/libcuda.so*",
|
||||||
|
"/usr/local/cuda/lib*/libcuda.so*",
|
||||||
|
"/usr/lib*/libcuda.so*",
|
||||||
|
"/usr/local/lib*/libcuda.so*",
|
||||||
|
}
|
||||||
|
|
||||||
|
var OneapiGlobs = []string{
|
||||||
|
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
||||||
|
"/usr/lib*/libze_intel_gpu.so*",
|
||||||
|
}
|
||||||
|
|
||||||
|
var CudartMgmtName = "libcudart.so*"
|
||||||
|
var NvcudaMgmtName = "libcuda.so*"
|
||||||
|
var NvmlMgmtName = "" // not currently wired on linux
|
||||||
|
var OneapiMgmtName = "libze_intel_gpu.so"
|
||||||
|
|
||||||
|
func GetCPUMem() (memInfo, error) {
|
||||||
|
var mem memInfo
|
||||||
|
var total, available, free, buffers, cached, freeSwap uint64
|
||||||
|
f, err := os.Open("/proc/meminfo")
|
||||||
|
if err != nil {
|
||||||
|
return mem, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
s := bufio.NewScanner(f)
|
||||||
|
for s.Scan() {
|
||||||
|
line := s.Text()
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(line, "MemTotal:"):
|
||||||
|
_, err = fmt.Sscanf(line, "MemTotal:%d", &total)
|
||||||
|
case strings.HasPrefix(line, "MemAvailable:"):
|
||||||
|
_, err = fmt.Sscanf(line, "MemAvailable:%d", &available)
|
||||||
|
case strings.HasPrefix(line, "MemFree:"):
|
||||||
|
_, err = fmt.Sscanf(line, "MemFree:%d", &free)
|
||||||
|
case strings.HasPrefix(line, "Buffers:"):
|
||||||
|
_, err = fmt.Sscanf(line, "Buffers:%d", &buffers)
|
||||||
|
case strings.HasPrefix(line, "Cached:"):
|
||||||
|
_, err = fmt.Sscanf(line, "Cached:%d", &cached)
|
||||||
|
case strings.HasPrefix(line, "SwapFree:"):
|
||||||
|
_, err = fmt.Sscanf(line, "SwapFree:%d", &freeSwap)
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return mem, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mem.TotalMemory = total * format.KibiByte
|
||||||
|
mem.FreeSwap = freeSwap * format.KibiByte
|
||||||
|
if available > 0 {
|
||||||
|
mem.FreeMemory = available * format.KibiByte
|
||||||
|
} else {
|
||||||
|
mem.FreeMemory = (free + buffers + cached) * format.KibiByte
|
||||||
|
}
|
||||||
|
return mem, nil
|
||||||
|
}
|
@@ -5,11 +5,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBasicGetGPUInfo(t *testing.T) {
|
func TestBasicGetGPUInfo(t *testing.T) {
|
||||||
info := GetGPUInfo()
|
info := GetGPUInfo()
|
||||||
assert.Greater(t, len(info), 0)
|
assert.NotEmpty(t, len(info))
|
||||||
assert.Contains(t, "cuda rocm cpu metal", info[0].Library)
|
assert.Contains(t, "cuda rocm cpu metal", info[0].Library)
|
||||||
if info[0].Library != "cpu" {
|
if info[0].Library != "cpu" {
|
||||||
assert.Greater(t, info[0].TotalMemory, uint64(0))
|
assert.Greater(t, info[0].TotalMemory, uint64(0))
|
||||||
@@ -19,7 +20,7 @@ func TestBasicGetGPUInfo(t *testing.T) {
|
|||||||
|
|
||||||
func TestCPUMemInfo(t *testing.T) {
|
func TestCPUMemInfo(t *testing.T) {
|
||||||
info, err := GetCPUMem()
|
info, err := GetCPUMem()
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
case "darwin":
|
case "darwin":
|
||||||
t.Skip("CPU memory not populated on darwin")
|
t.Skip("CPU memory not populated on darwin")
|
||||||
|
55
gpu/gpu_windows.go
Normal file
55
gpu/gpu_windows.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
package gpu
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MEMORYSTATUSEX struct {
|
||||||
|
length uint32
|
||||||
|
MemoryLoad uint32
|
||||||
|
TotalPhys uint64
|
||||||
|
AvailPhys uint64
|
||||||
|
TotalPageFile uint64
|
||||||
|
AvailPageFile uint64
|
||||||
|
TotalVirtual uint64
|
||||||
|
AvailVirtual uint64
|
||||||
|
AvailExtendedVirtual uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
k32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
globalMemoryStatusExProc = k32.NewProc("GlobalMemoryStatusEx")
|
||||||
|
sizeofMemoryStatusEx = uint32(unsafe.Sizeof(MEMORYSTATUSEX{}))
|
||||||
|
)
|
||||||
|
|
||||||
|
var CudartGlobs = []string{
|
||||||
|
"c:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\bin\\cudart64_*.dll",
|
||||||
|
}
|
||||||
|
|
||||||
|
var NvmlGlobs = []string{
|
||||||
|
"c:\\Windows\\System32\\nvml.dll",
|
||||||
|
}
|
||||||
|
|
||||||
|
var NvcudaGlobs = []string{
|
||||||
|
"c:\\windows\\system*\\nvcuda.dll",
|
||||||
|
}
|
||||||
|
|
||||||
|
var OneapiGlobs = []string{
|
||||||
|
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
||||||
|
}
|
||||||
|
|
||||||
|
var CudartMgmtName = "cudart64_*.dll"
|
||||||
|
var NvcudaMgmtName = "nvcuda.dll"
|
||||||
|
var NvmlMgmtName = "nvml.dll"
|
||||||
|
var OneapiMgmtName = "ze_intel_gpu64.dll"
|
||||||
|
|
||||||
|
func GetCPUMem() (memInfo, error) {
|
||||||
|
memStatus := MEMORYSTATUSEX{length: sizeofMemoryStatusEx}
|
||||||
|
r1, _, err := globalMemoryStatusExProc.Call(uintptr(unsafe.Pointer(&memStatus)))
|
||||||
|
if r1 == 0 {
|
||||||
|
return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err)
|
||||||
|
}
|
||||||
|
return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys, FreeSwap: memStatus.AvailPageFile}, nil
|
||||||
|
}
|
63
gpu/types.go
63
gpu/types.go
@@ -10,6 +10,7 @@ import (
|
|||||||
type memInfo struct {
|
type memInfo struct {
|
||||||
TotalMemory uint64 `json:"total_memory,omitempty"`
|
TotalMemory uint64 `json:"total_memory,omitempty"`
|
||||||
FreeMemory uint64 `json:"free_memory,omitempty"`
|
FreeMemory uint64 `json:"free_memory,omitempty"`
|
||||||
|
FreeSwap uint64 `json:"free_swap,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Beginning of an `ollama info` command
|
// Beginning of an `ollama info` command
|
||||||
@@ -18,7 +19,7 @@ type GpuInfo struct {
|
|||||||
Library string `json:"library,omitempty"`
|
Library string `json:"library,omitempty"`
|
||||||
|
|
||||||
// Optional variant to select (e.g. versions, cpu feature flags)
|
// Optional variant to select (e.g. versions, cpu feature flags)
|
||||||
Variant string `json:"variant,omitempty"`
|
Variant CPUCapability `json:"variant"`
|
||||||
|
|
||||||
// MinimumMemory represents the minimum memory required to use the GPU
|
// MinimumMemory represents the minimum memory required to use the GPU
|
||||||
MinimumMemory uint64 `json:"-"`
|
MinimumMemory uint64 `json:"-"`
|
||||||
@@ -26,6 +27,14 @@ type GpuInfo struct {
|
|||||||
// Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
|
// Any extra PATH/LD_LIBRARY_PATH dependencies required for the Library to operate properly
|
||||||
DependencyPath string `json:"lib_path,omitempty"`
|
DependencyPath string `json:"lib_path,omitempty"`
|
||||||
|
|
||||||
|
// Extra environment variables specific to the GPU as list of [key,value]
|
||||||
|
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
||||||
|
|
||||||
|
// Set to true if we can NOT reliably discover FreeMemory. A value of true indicates
|
||||||
|
// the FreeMemory is best effort, and may over or under report actual memory usage
|
||||||
|
// False indicates FreeMemory can generally be trusted on this GPU
|
||||||
|
UnreliableFreeMemory bool
|
||||||
|
|
||||||
// GPU information
|
// GPU information
|
||||||
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
||||||
Name string `json:"name"` // user friendly name if available
|
Name string `json:"name"` // user friendly name if available
|
||||||
@@ -38,6 +47,31 @@ type GpuInfo struct {
|
|||||||
// TODO other performance capability info to help in scheduling decisions
|
// TODO other performance capability info to help in scheduling decisions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CPUInfo struct {
|
||||||
|
GpuInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type CudaGPUInfo struct {
|
||||||
|
GpuInfo
|
||||||
|
OSOverhead uint64 // Memory overhead between the driver library and management library
|
||||||
|
index int //nolint:unused,nolintlint
|
||||||
|
}
|
||||||
|
type CudaGPUInfoList []CudaGPUInfo
|
||||||
|
|
||||||
|
type RocmGPUInfo struct {
|
||||||
|
GpuInfo
|
||||||
|
usedFilepath string //nolint:unused,nolintlint
|
||||||
|
index int //nolint:unused,nolintlint
|
||||||
|
}
|
||||||
|
type RocmGPUInfoList []RocmGPUInfo
|
||||||
|
|
||||||
|
type OneapiGPUInfo struct {
|
||||||
|
GpuInfo
|
||||||
|
driverIndex int //nolint:unused,nolintlint
|
||||||
|
gpuIndex int //nolint:unused,nolintlint
|
||||||
|
}
|
||||||
|
type OneapiGPUInfoList []OneapiGPUInfo
|
||||||
|
|
||||||
type GpuInfoList []GpuInfo
|
type GpuInfoList []GpuInfo
|
||||||
|
|
||||||
// Split up the set of gpu info's by Library and variant
|
// Split up the set of gpu info's by Library and variant
|
||||||
@@ -47,8 +81,8 @@ func (l GpuInfoList) ByLibrary() []GpuInfoList {
|
|||||||
for _, info := range l {
|
for _, info := range l {
|
||||||
found := false
|
found := false
|
||||||
requested := info.Library
|
requested := info.Library
|
||||||
if info.Variant != "" {
|
if info.Variant != CPUCapabilityNone {
|
||||||
requested += "_" + info.Variant
|
requested += "_" + info.Variant.String()
|
||||||
}
|
}
|
||||||
for i, lib := range libs {
|
for i, lib := range libs {
|
||||||
if lib == requested {
|
if lib == requested {
|
||||||
@@ -86,3 +120,26 @@ type ByFreeMemory []GpuInfo
|
|||||||
func (a ByFreeMemory) Len() int { return len(a) }
|
func (a ByFreeMemory) Len() int { return len(a) }
|
||||||
func (a ByFreeMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
func (a ByFreeMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
func (a ByFreeMemory) Less(i, j int) bool { return a[i].FreeMemory < a[j].FreeMemory }
|
func (a ByFreeMemory) Less(i, j int) bool { return a[i].FreeMemory < a[j].FreeMemory }
|
||||||
|
|
||||||
|
type CPUCapability uint32
|
||||||
|
|
||||||
|
// Override at build time when building base GPU runners
|
||||||
|
var GPURunnerCPUCapability = CPUCapabilityAVX
|
||||||
|
|
||||||
|
const (
|
||||||
|
CPUCapabilityNone CPUCapability = iota
|
||||||
|
CPUCapabilityAVX
|
||||||
|
CPUCapabilityAVX2
|
||||||
|
// TODO AVX512
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c CPUCapability) String() string {
|
||||||
|
switch c {
|
||||||
|
case CPUCapabilityAVX:
|
||||||
|
return "avx"
|
||||||
|
case CPUCapabilityAVX2:
|
||||||
|
return "avx2"
|
||||||
|
default:
|
||||||
|
return "no vector extensions"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -22,6 +22,7 @@ func TestMultiModelConcurrency(t *testing.T) {
|
|||||||
Model: "orca-mini",
|
Model: "orca-mini",
|
||||||
Prompt: "why is the ocean blue?",
|
Prompt: "why is the ocean blue?",
|
||||||
Stream: &stream,
|
Stream: &stream,
|
||||||
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"seed": 42,
|
"seed": 42,
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
@@ -30,6 +31,7 @@ func TestMultiModelConcurrency(t *testing.T) {
|
|||||||
Model: "tinydolphin",
|
Model: "tinydolphin",
|
||||||
Prompt: "what is the origin of the us thanksgiving holiday?",
|
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||||
Stream: &stream,
|
Stream: &stream,
|
||||||
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"seed": 42,
|
"seed": 42,
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
@@ -38,42 +40,64 @@ func TestMultiModelConcurrency(t *testing.T) {
|
|||||||
}
|
}
|
||||||
resp = [2][]string{
|
resp = [2][]string{
|
||||||
[]string{"sunlight"},
|
[]string{"sunlight"},
|
||||||
[]string{"england", "english", "massachusetts", "pilgrims"},
|
[]string{"england", "english", "massachusetts", "pilgrims", "british"},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(req))
|
wg.Add(len(req))
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*240)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
client, _, cleanup := InitServerConnection(ctx, t)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
for i := 0; i < len(req); i++ {
|
||||||
|
require.NoError(t, PullIfMissing(ctx, client, req[i].Model))
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < len(req); i++ {
|
for i := 0; i < len(req); i++ {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
GenerateTestHelper(ctx, t, req[i], resp[i])
|
DoGenerate(ctx, t, client, req[i], resp[i], 60*time.Second, 10*time.Second)
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
|
func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) // GTX 750 2G card takes ~9 minutes
|
req, resp := GenerateRequests()
|
||||||
|
reqLimit := len(req)
|
||||||
|
iterLimit := 5
|
||||||
|
|
||||||
|
vram := os.Getenv("OLLAMA_MAX_VRAM")
|
||||||
|
if vram != "" {
|
||||||
|
max, err := strconv.ParseUint(vram, 10, 64)
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Don't hammer on small VRAM cards...
|
||||||
|
if max < 4*1024*1024*1024 {
|
||||||
|
reqLimit = min(reqLimit, 2)
|
||||||
|
iterLimit = 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 9*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
client, _, cleanup := InitServerConnection(ctx, t)
|
client, _, cleanup := InitServerConnection(ctx, t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
req, resp := GenerateRequests()
|
|
||||||
// Get the server running (if applicable) warm the model up with a single initial request
|
// Get the server running (if applicable) warm the model up with a single initial request
|
||||||
DoGenerate(ctx, t, client, req[0], resp[0], 60*time.Second, 5*time.Second)
|
DoGenerate(ctx, t, client, req[0], resp[0], 60*time.Second, 10*time.Second)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(req))
|
wg.Add(reqLimit)
|
||||||
for i := 0; i < len(req); i++ {
|
for i := 0; i < reqLimit; i++ {
|
||||||
go func(i int) {
|
go func(i int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for j := 0; j < 5; j++ {
|
for j := 0; j < iterLimit; j++ {
|
||||||
slog.Info("Starting", "req", i, "iter", j)
|
slog.Info("Starting", "req", i, "iter", j)
|
||||||
// On slower GPUs it can take a while to process the 4 concurrent requests
|
// On slower GPUs it can take a while to process the concurrent requests
|
||||||
// so we allow a much longer initial timeout
|
// so we allow a much longer initial timeout
|
||||||
DoGenerate(ctx, t, client, req[i], resp[i], 90*time.Second, 5*time.Second)
|
DoGenerate(ctx, t, client, req[i], resp[i], 120*time.Second, 20*time.Second)
|
||||||
}
|
}
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
@@ -221,5 +245,23 @@ func TestMultiModelStress(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
models, err := client.ListRunning(ctx)
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("failed to list running models", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, m := range models.Models {
|
||||||
|
slog.Info("loaded model snapshot", "model", m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
@@ -11,7 +11,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestContextExhaustion(t *testing.T) {
|
func TestContextExhaustion(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) // TODO maybe shorter?
|
// Longer needed for small footprint GPUs
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
// Set up the test data
|
// Set up the test data
|
||||||
req := api.GenerateRequest{
|
req := api.GenerateRequest{
|
||||||
|
@@ -32,7 +32,11 @@ func TestIntegrationMultimodal(t *testing.T) {
|
|||||||
resp := "the ollam"
|
resp := "the ollam"
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
GenerateTestHelper(ctx, t, req, []string{resp})
|
client, _, cleanup := InitServerConnection(ctx, t)
|
||||||
|
defer cleanup()
|
||||||
|
require.NoError(t, PullIfMissing(ctx, client, req.Model))
|
||||||
|
// llava models on CPU can be quite slow to start,
|
||||||
|
DoGenerate(ctx, t, client, req, []string{resp}, 120*time.Second, 30*time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
const imageEncoding = `iVBORw0KGgoAAAANSUhEUgAAANIAAAB4CAYAAACHHqzKAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEb
|
const imageEncoding = `iVBORw0KGgoAAAANSUhEUgAAANIAAAB4CAYAAACHHqzKAAAAAXNSR0IArs4c6QAAAIRlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEb
|
||||||
|
@@ -140,7 +140,7 @@ func PullIfMissing(ctx context.Context, client *api.Client, modelName string) er
|
|||||||
|
|
||||||
showCtx, cancel := context.WithDeadlineCause(
|
showCtx, cancel := context.WithDeadlineCause(
|
||||||
ctx,
|
ctx,
|
||||||
time.Now().Add(5*time.Second),
|
time.Now().Add(10*time.Second),
|
||||||
fmt.Errorf("show for existing model %s took too long", modelName),
|
fmt.Errorf("show for existing model %s took too long", modelName),
|
||||||
)
|
)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@@ -290,6 +290,7 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
|||||||
Model: "orca-mini",
|
Model: "orca-mini",
|
||||||
Prompt: "why is the ocean blue?",
|
Prompt: "why is the ocean blue?",
|
||||||
Stream: &stream,
|
Stream: &stream,
|
||||||
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"seed": 42,
|
"seed": 42,
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
@@ -298,6 +299,7 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
|||||||
Model: "orca-mini",
|
Model: "orca-mini",
|
||||||
Prompt: "why is the color of dirt brown?",
|
Prompt: "why is the color of dirt brown?",
|
||||||
Stream: &stream,
|
Stream: &stream,
|
||||||
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"seed": 42,
|
"seed": 42,
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
@@ -306,6 +308,7 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
|||||||
Model: "orca-mini",
|
Model: "orca-mini",
|
||||||
Prompt: "what is the origin of the us thanksgiving holiday?",
|
Prompt: "what is the origin of the us thanksgiving holiday?",
|
||||||
Stream: &stream,
|
Stream: &stream,
|
||||||
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"seed": 42,
|
"seed": 42,
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
@@ -314,6 +317,7 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
|||||||
Model: "orca-mini",
|
Model: "orca-mini",
|
||||||
Prompt: "what is the origin of independence day?",
|
Prompt: "what is the origin of independence day?",
|
||||||
Stream: &stream,
|
Stream: &stream,
|
||||||
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"seed": 42,
|
"seed": 42,
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
@@ -322,6 +326,7 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
|||||||
Model: "orca-mini",
|
Model: "orca-mini",
|
||||||
Prompt: "what is the composition of air?",
|
Prompt: "what is the composition of air?",
|
||||||
Stream: &stream,
|
Stream: &stream,
|
||||||
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
||||||
Options: map[string]interface{}{
|
Options: map[string]interface{}{
|
||||||
"seed": 42,
|
"seed": 42,
|
||||||
"temperature": 0.0,
|
"temperature": 0.0,
|
||||||
@@ -331,7 +336,7 @@ func GenerateRequests() ([]api.GenerateRequest, [][]string) {
|
|||||||
[][]string{
|
[][]string{
|
||||||
[]string{"sunlight"},
|
[]string{"sunlight"},
|
||||||
[]string{"soil", "organic", "earth", "black", "tan"},
|
[]string{"soil", "organic", "earth", "black", "tan"},
|
||||||
[]string{"england", "english", "massachusetts", "pilgrims"},
|
[]string{"england", "english", "massachusetts", "pilgrims", "british"},
|
||||||
[]string{"fourth", "july", "declaration", "independence"},
|
[]string{"fourth", "july", "declaration", "independence"},
|
||||||
[]string{"nitrogen", "oxygen", "carbon", "dioxide"},
|
[]string{"nitrogen", "oxygen", "carbon", "dioxide"},
|
||||||
}
|
}
|
||||||
|
3
llm/ext_server/CMakeLists.txt
vendored
3
llm/ext_server/CMakeLists.txt
vendored
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
set(TARGET ollama_llama_server)
|
set(TARGET ollama_llama_server)
|
||||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
@@ -7,7 +6,7 @@ install(TARGETS ${TARGET} RUNTIME)
|
|||||||
target_compile_definitions(${TARGET} PRIVATE
|
target_compile_definitions(${TARGET} PRIVATE
|
||||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||||
)
|
)
|
||||||
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||||
endif()
|
endif()
|
||||||
|
200
llm/ext_server/server.cpp
vendored
200
llm/ext_server/server.cpp
vendored
@@ -56,7 +56,6 @@ struct server_params {
|
|||||||
std::string hostname = "127.0.0.1";
|
std::string hostname = "127.0.0.1";
|
||||||
std::vector<std::string> api_keys;
|
std::vector<std::string> api_keys;
|
||||||
std::string public_path = "examples/server/public";
|
std::string public_path = "examples/server/public";
|
||||||
std::string chat_template = "";
|
|
||||||
int32_t port = 8080;
|
int32_t port = 8080;
|
||||||
int32_t read_timeout = 600;
|
int32_t read_timeout = 600;
|
||||||
int32_t write_timeout = 600;
|
int32_t write_timeout = 600;
|
||||||
@@ -140,7 +139,6 @@ struct server_slot {
|
|||||||
std::vector<llama_token> cache_tokens;
|
std::vector<llama_token> cache_tokens;
|
||||||
std::vector<completion_token_output> generated_token_probs;
|
std::vector<completion_token_output> generated_token_probs;
|
||||||
|
|
||||||
bool infill = false;
|
|
||||||
bool embedding = false;
|
bool embedding = false;
|
||||||
bool has_next_token = true;
|
bool has_next_token = true;
|
||||||
bool truncated = false;
|
bool truncated = false;
|
||||||
@@ -187,7 +185,6 @@ struct server_slot {
|
|||||||
n_past = 0;
|
n_past = 0;
|
||||||
n_sent_text = 0;
|
n_sent_text = 0;
|
||||||
n_sent_token_probs = 0;
|
n_sent_token_probs = 0;
|
||||||
infill = false;
|
|
||||||
ga_i = 0;
|
ga_i = 0;
|
||||||
n_past_se = 0;
|
n_past_se = 0;
|
||||||
|
|
||||||
@@ -361,7 +358,6 @@ struct llama_server_context
|
|||||||
|
|
||||||
// slots / clients
|
// slots / clients
|
||||||
std::vector<server_slot> slots;
|
std::vector<server_slot> slots;
|
||||||
json default_generation_settings_for_props;
|
|
||||||
|
|
||||||
llama_server_queue queue_tasks;
|
llama_server_queue queue_tasks;
|
||||||
llama_server_response queue_results;
|
llama_server_response queue_results;
|
||||||
@@ -430,16 +426,6 @@ struct llama_server_context
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void validate_model_chat_template(server_params & sparams) {
|
|
||||||
llama_chat_message chat[] = {{"user", "test"}};
|
|
||||||
std::vector<char> buf(1);
|
|
||||||
int res = llama_chat_apply_template(model, nullptr, chat, 1, true, buf.data(), buf.size());
|
|
||||||
if (res < 0) {
|
|
||||||
LOG_ERROR("The chat template comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
|
|
||||||
sparams.chat_template = "chatml";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void initialize() {
|
void initialize() {
|
||||||
// create slots
|
// create slots
|
||||||
all_slots_are_idle = true;
|
all_slots_are_idle = true;
|
||||||
@@ -485,9 +471,6 @@ struct llama_server_context
|
|||||||
slots.push_back(slot);
|
slots.push_back(slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
default_generation_settings_for_props = get_formated_generation(slots.front());
|
|
||||||
default_generation_settings_for_props["seed"] = -1;
|
|
||||||
|
|
||||||
batch = llama_batch_init(n_ctx, 0, params.n_parallel);
|
batch = llama_batch_init(n_ctx, 0, params.n_parallel);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -586,7 +569,7 @@ struct llama_server_context
|
|||||||
slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
|
slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
|
||||||
slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
|
slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
|
||||||
slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
|
slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
|
||||||
slot->params.seed = json_value(data, "seed", default_params.seed);
|
slot->sparams.seed = json_value(data, "seed", default_params.seed);
|
||||||
slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
|
slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
|
||||||
slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
|
slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
|
||||||
slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
|
slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
|
||||||
@@ -600,16 +583,6 @@ struct llama_server_context
|
|||||||
slot->params.n_predict = slot->n_predict;
|
slot->params.n_predict = slot->n_predict;
|
||||||
}
|
}
|
||||||
|
|
||||||
// infill
|
|
||||||
if (data.count("input_prefix") != 0)
|
|
||||||
{
|
|
||||||
slot->params.input_prefix = data["input_prefix"];
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
slot->params.input_prefix = "";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data.count("input_suffix") != 0)
|
if (data.count("input_suffix") != 0)
|
||||||
{
|
{
|
||||||
slot->params.input_suffix = data["input_suffix"];
|
slot->params.input_suffix = data["input_suffix"];
|
||||||
@@ -823,7 +796,6 @@ struct llama_server_context
|
|||||||
llama_sampling_free(slot->ctx_sampling);
|
llama_sampling_free(slot->ctx_sampling);
|
||||||
}
|
}
|
||||||
slot->ctx_sampling = llama_sampling_init(slot->sparams);
|
slot->ctx_sampling = llama_sampling_init(slot->sparams);
|
||||||
llama_set_rng_seed(ctx, slot->params.seed);
|
|
||||||
slot->command = LOAD_PROMPT;
|
slot->command = LOAD_PROMPT;
|
||||||
|
|
||||||
all_slots_are_idle = false;
|
all_slots_are_idle = false;
|
||||||
@@ -847,7 +819,7 @@ struct llama_server_context
|
|||||||
system_tokens.clear();
|
system_tokens.clear();
|
||||||
|
|
||||||
if (!system_prompt.empty()) {
|
if (!system_prompt.empty()) {
|
||||||
system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token);
|
system_tokens = ::llama_tokenize(ctx, system_prompt, true);
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
@@ -897,15 +869,6 @@ struct llama_server_context
|
|||||||
system_need_update = true;
|
system_need_update = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void system_prompt_process(const json &sys_props) {
|
|
||||||
system_prompt = sys_props.value("prompt", "");
|
|
||||||
name_user = sys_props.value("anti_prompt", "");
|
|
||||||
name_assistant = sys_props.value("assistant_name", "");
|
|
||||||
|
|
||||||
|
|
||||||
system_prompt_notify();
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
static size_t find_stopping_strings(const std::string &text, const size_t last_token_size,
|
||||||
const stop_type type, server_slot &slot)
|
const stop_type type, server_slot &slot)
|
||||||
{
|
{
|
||||||
@@ -1263,13 +1226,12 @@ struct llama_server_context
|
|||||||
queue_results.send(res);
|
queue_results.send(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
void request_completion(int task_id, json data, bool infill, bool embedding, int multitask_id)
|
void request_completion(int task_id, json data, bool embedding, int multitask_id)
|
||||||
{
|
{
|
||||||
task_server task;
|
task_server task;
|
||||||
task.id = task_id;
|
task.id = task_id;
|
||||||
task.target_id = 0;
|
task.target_id = 0;
|
||||||
task.data = std::move(data);
|
task.data = std::move(data);
|
||||||
task.infill_mode = infill;
|
|
||||||
task.embedding_mode = embedding;
|
task.embedding_mode = embedding;
|
||||||
task.type = TASK_TYPE_COMPLETION;
|
task.type = TASK_TYPE_COMPLETION;
|
||||||
task.multitask_id = multitask_id;
|
task.multitask_id = multitask_id;
|
||||||
@@ -1415,17 +1377,55 @@ struct llama_server_context
|
|||||||
json subtask_data = multiprompt_task.data;
|
json subtask_data = multiprompt_task.data;
|
||||||
subtask_data["prompt"] = subtask_data["prompt"][i];
|
subtask_data["prompt"] = subtask_data["prompt"][i];
|
||||||
|
|
||||||
// subtasks inherit everything else (infill mode, embedding mode, etc.)
|
// subtasks inherit everything else (embedding mode, etc.)
|
||||||
request_completion(subtask_ids[i], subtask_data, multiprompt_task.infill_mode, multiprompt_task.embedding_mode, multitask_id);
|
request_completion(subtask_ids[i], subtask_data, multiprompt_task.embedding_mode, multitask_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string common_prefix(const std::string& str1, const std::string& str2) {
|
||||||
|
auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin());
|
||||||
|
return std::string(str1.begin(), mismatch_pair.first);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the slot that has the greatest common prefix
|
||||||
|
server_slot *prefix_slot(const json &prompt) {
|
||||||
|
if (!prompt.is_string()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string prompt_str = prompt.get<std::string>();
|
||||||
|
server_slot *slot = nullptr;
|
||||||
|
size_t longest = 0;
|
||||||
|
|
||||||
|
for (server_slot &s : slots) {
|
||||||
|
if (s.available() && s.prompt.is_string()) {
|
||||||
|
std::string s_prompt = s.prompt.get<std::string>();
|
||||||
|
std::string prefix = common_prefix(s_prompt, prompt_str);
|
||||||
|
|
||||||
|
if (prefix.size() > longest) {
|
||||||
|
slot = &s;
|
||||||
|
longest = prefix.size();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!slot) {
|
||||||
|
return get_slot(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_DEBUG("slot with common prefix found", {{
|
||||||
|
"slot_id", slot->id,
|
||||||
|
"characters", longest
|
||||||
|
}});
|
||||||
|
return slot;
|
||||||
|
}
|
||||||
|
|
||||||
void process_single_task(task_server& task)
|
void process_single_task(task_server& task)
|
||||||
{
|
{
|
||||||
switch (task.type)
|
switch (task.type)
|
||||||
{
|
{
|
||||||
case TASK_TYPE_COMPLETION: {
|
case TASK_TYPE_COMPLETION: {
|
||||||
server_slot *slot = get_slot(json_value(task.data, "slot_id", -1));
|
server_slot *slot = prefix_slot(task.data["prompt"]);
|
||||||
if (slot == nullptr)
|
if (slot == nullptr)
|
||||||
{
|
{
|
||||||
// if no slot is available, we defer this task for processing later
|
// if no slot is available, we defer this task for processing later
|
||||||
@@ -1434,26 +1434,8 @@ struct llama_server_context
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task.data.contains("system_prompt"))
|
|
||||||
{
|
|
||||||
if (!all_slots_are_idle) {
|
|
||||||
send_error(task, "system prompt can only be updated when all slots are idle");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
system_prompt_process(task.data["system_prompt"]);
|
|
||||||
|
|
||||||
// reset cache_tokens for all slots
|
|
||||||
for (server_slot &slot : slots)
|
|
||||||
{
|
|
||||||
slot.cache_tokens.clear();
|
|
||||||
slot.n_past = 0;
|
|
||||||
slot.n_past_se = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
slot->reset();
|
slot->reset();
|
||||||
|
|
||||||
slot->infill = task.infill_mode;
|
|
||||||
slot->embedding = task.embedding_mode;
|
slot->embedding = task.embedding_mode;
|
||||||
slot->task_id = task.id;
|
slot->task_id = task.id;
|
||||||
slot->multitask_id = task.multitask_id;
|
slot->multitask_id = task.multitask_id;
|
||||||
@@ -1679,8 +1661,7 @@ struct llama_server_context
|
|||||||
const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
|
const bool has_prompt = slot.prompt.is_array() || (slot.prompt.is_string() && !slot.prompt.get<std::string>().empty()) || !slot.images.empty();
|
||||||
|
|
||||||
// empty prompt passed -> release the slot and send empty response
|
// empty prompt passed -> release the slot and send empty response
|
||||||
// note: infill mode allows empty prompt
|
if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt)
|
||||||
if (slot.state == IDLE && slot.command == LOAD_PROMPT && !has_prompt && !slot.infill)
|
|
||||||
{
|
{
|
||||||
slot.release();
|
slot.release();
|
||||||
slot.print_timings();
|
slot.print_timings();
|
||||||
@@ -1697,33 +1678,7 @@ struct llama_server_context
|
|||||||
slot.t_start_process_prompt = ggml_time_us();
|
slot.t_start_process_prompt = ggml_time_us();
|
||||||
slot.t_start_genereration = 0;
|
slot.t_start_genereration = 0;
|
||||||
|
|
||||||
if (slot.infill)
|
prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
|
||||||
{
|
|
||||||
bool suff_rm_leading_spc = true;
|
|
||||||
if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1)
|
|
||||||
{
|
|
||||||
params.input_suffix.erase(0, 1);
|
|
||||||
suff_rm_leading_spc = false;
|
|
||||||
}
|
|
||||||
auto prefix_tokens = tokenize(slot.params.input_prefix, false);
|
|
||||||
auto suffix_tokens = tokenize(slot.params.input_suffix, false);
|
|
||||||
|
|
||||||
const int space_token = 29871; // TODO: this should not be hardcoded
|
|
||||||
if (suff_rm_leading_spc && !suffix_tokens.empty() && suffix_tokens[0] == space_token) {
|
|
||||||
suffix_tokens.erase(suffix_tokens.begin());
|
|
||||||
}
|
|
||||||
|
|
||||||
prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
|
|
||||||
prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
|
|
||||||
prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
|
|
||||||
prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
|
|
||||||
prefix_tokens.push_back(llama_token_middle(model));
|
|
||||||
prompt_tokens = prefix_tokens;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
|
|
||||||
}
|
|
||||||
|
|
||||||
slot.n_prompt_tokens = prompt_tokens.size();
|
slot.n_prompt_tokens = prompt_tokens.size();
|
||||||
|
|
||||||
@@ -1737,22 +1692,23 @@ struct llama_server_context
|
|||||||
if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
|
if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx)
|
||||||
{
|
{
|
||||||
const int n_left = slot.n_ctx - slot.params.n_keep;
|
const int n_left = slot.n_ctx - slot.params.n_keep;
|
||||||
const int n_block_size = n_left / 2;
|
const int n_shift = n_left / 2;
|
||||||
const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size;
|
const int n_erase = slot.n_prompt_tokens - slot.params.n_keep - n_shift;
|
||||||
|
|
||||||
std::vector<llama_token> new_tokens(
|
std::vector<llama_token> new_tokens(
|
||||||
prompt_tokens.begin(),
|
prompt_tokens.begin(),
|
||||||
prompt_tokens.begin() + slot.params.n_keep);
|
prompt_tokens.begin() + slot.params.n_keep);
|
||||||
new_tokens.insert(
|
new_tokens.insert(
|
||||||
new_tokens.end(),
|
new_tokens.end(),
|
||||||
prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size,
|
prompt_tokens.begin() + slot.params.n_keep + n_erase,
|
||||||
prompt_tokens.end());
|
prompt_tokens.end());
|
||||||
|
|
||||||
LOG_VERBOSE("input truncated", {
|
LOG_INFO("input truncated", {
|
||||||
{"n_ctx", slot.n_ctx},
|
{"n_ctx", slot.n_ctx},
|
||||||
{"n_keep", slot.params.n_keep},
|
{"n_keep", slot.params.n_keep},
|
||||||
{"n_left", n_left},
|
{"n_left", n_left},
|
||||||
{"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())},
|
{"n_shift", n_shift},
|
||||||
|
{"n_erase", n_erase},
|
||||||
});
|
});
|
||||||
slot.truncated = true;
|
slot.truncated = true;
|
||||||
prompt_tokens = new_tokens;
|
prompt_tokens = new_tokens;
|
||||||
@@ -1787,7 +1743,7 @@ struct llama_server_context
|
|||||||
slot.n_past -= 1;
|
slot.n_past -= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
slot.n_prompt_tokens_processed = slot.n_prompt_tokens - slot.n_past;
|
slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
|
||||||
|
|
||||||
if (slot.ga_n != 1)
|
if (slot.ga_n != 1)
|
||||||
{
|
{
|
||||||
@@ -2130,8 +2086,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void server_params_parse(int argc, char **argv, server_params &sparams,
|
static void server_params_parse(int argc, char **argv, server_params &sparams, gpt_params ¶ms)
|
||||||
gpt_params ¶ms, llama_server_context& llama)
|
|
||||||
{
|
{
|
||||||
gpt_params default_params;
|
gpt_params default_params;
|
||||||
server_params default_sparams;
|
server_params default_sparams;
|
||||||
@@ -2408,9 +2363,9 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#ifndef GGML_USE_CUBLAS
|
#ifndef GGML_USE_CUDA
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n");
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUDA
|
||||||
}
|
}
|
||||||
else if (arg == "--tensor-split" || arg == "-ts")
|
else if (arg == "--tensor-split" || arg == "-ts")
|
||||||
{
|
{
|
||||||
@@ -2419,7 +2374,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
|
#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
|
||||||
std::string arg_next = argv[i];
|
std::string arg_next = argv[i];
|
||||||
|
|
||||||
// split string by , and /
|
// split string by , and /
|
||||||
@@ -2440,8 +2395,8 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n", {});
|
LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n", {});
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUDA
|
||||||
}
|
}
|
||||||
else if (arg == "--main-gpu" || arg == "-mg")
|
else if (arg == "--main-gpu" || arg == "-mg")
|
||||||
{
|
{
|
||||||
@@ -2450,7 +2405,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_SYCL)
|
#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
|
||||||
params.main_gpu = std::stoi(argv[i]);
|
params.main_gpu = std::stoi(argv[i]);
|
||||||
#else
|
#else
|
||||||
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
|
LOG_WARNING("llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.", {});
|
||||||
@@ -2546,27 +2501,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
}
|
}
|
||||||
params.n_predict = std::stoi(argv[i]);
|
params.n_predict = std::stoi(argv[i]);
|
||||||
}
|
}
|
||||||
else if (arg == "-spf" || arg == "--system-prompt-file")
|
|
||||||
{
|
|
||||||
if (++i >= argc)
|
|
||||||
{
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
std::ifstream file(argv[i]);
|
|
||||||
if (!file) {
|
|
||||||
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
std::string systm_content;
|
|
||||||
std::copy(
|
|
||||||
std::istreambuf_iterator<char>(file),
|
|
||||||
std::istreambuf_iterator<char>(),
|
|
||||||
std::back_inserter(systm_content)
|
|
||||||
);
|
|
||||||
llama.system_prompt_process(json::parse(systm_content));
|
|
||||||
}
|
|
||||||
else if (arg == "-ctk" || arg == "--cache-type-k") {
|
else if (arg == "-ctk" || arg == "--cache-type-k") {
|
||||||
params.cache_type_k = argv[++i];
|
params.cache_type_k = argv[++i];
|
||||||
}
|
}
|
||||||
@@ -2629,7 +2563,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
sparams.chat_template = argv[i];
|
|
||||||
}
|
}
|
||||||
else if (arg == "--override-kv")
|
else if (arg == "--override-kv")
|
||||||
{
|
{
|
||||||
@@ -2818,7 +2751,7 @@ int main(int argc, char **argv) {
|
|||||||
// struct that contains llama context and inference
|
// struct that contains llama context and inference
|
||||||
llama_server_context llama;
|
llama_server_context llama;
|
||||||
|
|
||||||
server_params_parse(argc, argv, sparams, params, llama);
|
server_params_parse(argc, argv, sparams, params);
|
||||||
|
|
||||||
if (params.model_alias == "unknown")
|
if (params.model_alias == "unknown")
|
||||||
{
|
{
|
||||||
@@ -3102,11 +3035,6 @@ int main(int argc, char **argv) {
|
|||||||
}
|
}
|
||||||
const auto model_meta = llama.model_meta();
|
const auto model_meta = llama.model_meta();
|
||||||
|
|
||||||
if (sparams.chat_template.empty()) { // custom chat template is not supplied
|
|
||||||
// check if the template comes with the model is supported by us
|
|
||||||
llama.validate_model_chat_template(sparams);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Middleware for API key validation
|
// Middleware for API key validation
|
||||||
auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
|
auto validate_api_key = [&sparams](const httplib::Request &req, httplib::Response &res) -> bool {
|
||||||
// If API key is not set, skip validation
|
// If API key is not set, skip validation
|
||||||
@@ -3150,7 +3078,7 @@ int main(int argc, char **argv) {
|
|||||||
json data = json::parse(req.body);
|
json data = json::parse(req.body);
|
||||||
const int task_id = llama.queue_tasks.get_new_id();
|
const int task_id = llama.queue_tasks.get_new_id();
|
||||||
llama.queue_results.add_waiting_task_id(task_id);
|
llama.queue_results.add_waiting_task_id(task_id);
|
||||||
llama.request_completion(task_id, data, false, false, -1);
|
llama.request_completion(task_id, data, false, -1);
|
||||||
if (!json_value(data, "stream", false)) {
|
if (!json_value(data, "stream", false)) {
|
||||||
std::string completion_text;
|
std::string completion_text;
|
||||||
task_result result = llama.queue_results.recv(task_id);
|
task_result result = llama.queue_results.recv(task_id);
|
||||||
@@ -3272,7 +3200,7 @@ int main(int argc, char **argv) {
|
|||||||
// create and queue the task
|
// create and queue the task
|
||||||
const int task_id = llama.queue_tasks.get_new_id();
|
const int task_id = llama.queue_tasks.get_new_id();
|
||||||
llama.queue_results.add_waiting_task_id(task_id);
|
llama.queue_results.add_waiting_task_id(task_id);
|
||||||
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1);
|
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, true, -1);
|
||||||
|
|
||||||
// get the result
|
// get the result
|
||||||
task_result result = llama.queue_results.recv(task_id);
|
task_result result = llama.queue_results.recv(task_id);
|
||||||
|
@@ -18,26 +18,26 @@ sign() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DLLAMA_METAL_EMBED_LIBRARY=on"
|
COMMON_DARWIN_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off"
|
||||||
|
|
||||||
case "${GOARCH}" in
|
case "${GOARCH}" in
|
||||||
"amd64")
|
"amd64")
|
||||||
COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_NATIVE=off"
|
COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DGGML_METAL=off -DGGML_NATIVE=off"
|
||||||
|
|
||||||
# Static build for linking into the Go binary
|
# Static build for linking into the Go binary
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_TARGETS="--target llama --target ggml"
|
CMAKE_TARGETS="--target llama --target ggml"
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}_static"
|
BUILD_DIR="../build/darwin/${ARCH}_static"
|
||||||
echo "Building static library"
|
echo "Building static library"
|
||||||
build
|
build
|
||||||
|
|
||||||
|
if [ -z "$OLLAMA_SKIP_CPU_GENERATE" ]; then
|
||||||
#
|
#
|
||||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/cpu"
|
BUILD_DIR="../build/darwin/${ARCH}/cpu"
|
||||||
echo "Building LCD CPU"
|
echo "Building LCD CPU"
|
||||||
build
|
build
|
||||||
@@ -49,7 +49,7 @@ case "${GOARCH}" in
|
|||||||
# Approximately 400% faster than LCD on same CPU
|
# Approximately 400% faster than LCD on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx"
|
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx"
|
||||||
echo "Building AVX CPU"
|
echo "Building AVX CPU"
|
||||||
build
|
build
|
||||||
@@ -61,31 +61,34 @@ case "${GOARCH}" in
|
|||||||
# Approximately 10% faster than AVX on same CPU
|
# Approximately 10% faster than AVX on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=on -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=on -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2"
|
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2"
|
||||||
echo "Building AVX2 CPU"
|
echo "Building AVX2 CPU"
|
||||||
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation"
|
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation"
|
||||||
build
|
build
|
||||||
sign ${BUILD_DIR}/bin/ollama_llama_server
|
sign ${BUILD_DIR}/bin/ollama_llama_server
|
||||||
compress
|
compress
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
"arm64")
|
"arm64")
|
||||||
|
|
||||||
# Static build for linking into the Go binary
|
# Static build for linking into the Go binary
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_TARGETS="--target llama --target ggml"
|
CMAKE_TARGETS="--target llama --target ggml"
|
||||||
CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}_static"
|
BUILD_DIR="../build/darwin/${ARCH}_static"
|
||||||
echo "Building static library"
|
echo "Building static library"
|
||||||
build
|
build
|
||||||
|
|
||||||
|
if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DLLAMA_ACCELERATE=on -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/metal"
|
BUILD_DIR="../build/darwin/${ARCH}/metal"
|
||||||
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders"
|
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders"
|
||||||
build
|
build
|
||||||
sign ${BUILD_DIR}/bin/ollama_llama_server
|
sign ${BUILD_DIR}/bin/ollama_llama_server
|
||||||
compress
|
compress
|
||||||
|
fi
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "GOARCH must be set"
|
echo "GOARCH must be set"
|
||||||
|
@@ -51,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then
|
|||||||
export CUDACXX=$(command -v nvcc)
|
export CUDACXX=$(command -v nvcc)
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off"
|
COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
|
||||||
source $(dirname $0)/gen_common.sh
|
source $(dirname $0)/gen_common.sh
|
||||||
init_vars
|
init_vars
|
||||||
git_module_setup
|
git_module_setup
|
||||||
@@ -64,7 +64,7 @@ if [ -z "${OLLAMA_SKIP_STATIC_GENERATE}" -o "${OLLAMA_CPU_TARGET}" = "static" ];
|
|||||||
# Static build for linking into the Go binary
|
# Static build for linking into the Go binary
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_TARGETS="--target llama --target ggml"
|
CMAKE_TARGETS="--target llama --target ggml"
|
||||||
CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DGGML_NATIVE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}_static"
|
BUILD_DIR="../build/linux/${ARCH}_static"
|
||||||
echo "Building static library"
|
echo "Building static library"
|
||||||
build
|
build
|
||||||
@@ -77,29 +77,29 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
|||||||
if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then
|
if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then
|
||||||
init_vars
|
init_vars
|
||||||
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
|
echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\""
|
||||||
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
||||||
echo "Building custom CPU"
|
echo "Building custom CPU"
|
||||||
build
|
build
|
||||||
compress
|
compress
|
||||||
else
|
else
|
||||||
# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
|
# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
|
||||||
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||||
# -DLLAMA_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
|
# -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
|
||||||
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||||
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||||
# Note: the following seem to yield slower results than AVX2 - ymmv
|
# Note: the following seem to yield slower results than AVX2 - ymmv
|
||||||
# -DLLAMA_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
|
# -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
|
||||||
# -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake
|
# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
|
||||||
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
|
# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
|
||||||
|
|
||||||
COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off"
|
COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
|
||||||
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
||||||
#
|
#
|
||||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
||||||
echo "Building LCD CPU"
|
echo "Building LCD CPU"
|
||||||
build
|
build
|
||||||
@@ -116,7 +116,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
|||||||
# Approximately 400% faster than LCD on same CPU
|
# Approximately 400% faster than LCD on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu_avx"
|
BUILD_DIR="../build/linux/${ARCH}/cpu_avx"
|
||||||
echo "Building AVX CPU"
|
echo "Building AVX CPU"
|
||||||
build
|
build
|
||||||
@@ -129,7 +129,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
|||||||
# Approximately 10% faster than AVX on same CPU
|
# Approximately 10% faster than AVX on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu_avx2"
|
BUILD_DIR="../build/linux/${ARCH}/cpu_avx2"
|
||||||
echo "Building AVX2 CPU"
|
echo "Building AVX2 CPU"
|
||||||
build
|
build
|
||||||
@@ -170,15 +170,15 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then
|
|||||||
#
|
#
|
||||||
# CUDA compute < 6.0 lacks proper FP16 support on ARM.
|
# CUDA compute < 6.0 lacks proper FP16 support on ARM.
|
||||||
# Disabling has minimal performance effect while maintaining compatibility.
|
# Disabling has minimal performance effect while maintaining compatibility.
|
||||||
ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off"
|
ARM64_DEFS="-DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_CUDA_F16=off"
|
||||||
fi
|
fi
|
||||||
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
||||||
if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then
|
if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then
|
||||||
echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\""
|
echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\""
|
||||||
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}"
|
CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}"
|
||||||
echo "Building custom CUDA GPU"
|
echo "Building custom CUDA GPU"
|
||||||
else
|
else
|
||||||
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}"
|
CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}"
|
||||||
fi
|
fi
|
||||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}"
|
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}"
|
BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}"
|
||||||
@@ -211,12 +211,12 @@ if [ -z "${ONEAPI_ROOT}" ]; then
|
|||||||
ONEAPI_ROOT=/opt/intel/oneapi
|
ONEAPI_ROOT=/opt/intel/oneapi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d "${ONEAPI_ROOT}" ]; then
|
if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then
|
||||||
echo "OneAPI libraries detected - building dynamic OneAPI library"
|
echo "OneAPI libraries detected - building dynamic OneAPI library"
|
||||||
init_vars
|
init_vars
|
||||||
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
|
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
|
||||||
CC=icx
|
CC=icx
|
||||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL=ON -DLLAMA_SYCL_F16=OFF"
|
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DGGML_SYCL_F16=OFF"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/oneapi"
|
BUILD_DIR="../build/linux/${ARCH}/oneapi"
|
||||||
EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb"
|
EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb"
|
||||||
DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it
|
DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it
|
||||||
@@ -254,7 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
|
|||||||
ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)
|
ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)
|
||||||
fi
|
fi
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
|
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DLLAMA_CUDA_NO_PEER_COPY=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
|
||||||
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
||||||
if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then
|
if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then
|
||||||
echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\""
|
echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\""
|
||||||
|
@@ -6,18 +6,9 @@ function amdGPUs {
|
|||||||
if ($env:AMDGPU_TARGETS) {
|
if ($env:AMDGPU_TARGETS) {
|
||||||
return $env:AMDGPU_TARGETS
|
return $env:AMDGPU_TARGETS
|
||||||
}
|
}
|
||||||
# TODO - load from some common data file for linux + windows build consistency
|
# Current supported rocblas list from ROCm v6.1.2 on windows
|
||||||
$GPU_LIST = @(
|
$GPU_LIST = @(
|
||||||
"gfx900"
|
|
||||||
"gfx906:xnack-"
|
"gfx906:xnack-"
|
||||||
"gfx908:xnack-"
|
|
||||||
"gfx90a:xnack+"
|
|
||||||
"gfx90a:xnack-"
|
|
||||||
"gfx940"
|
|
||||||
"gfx941"
|
|
||||||
"gfx942"
|
|
||||||
"gfx1010"
|
|
||||||
"gfx1012"
|
|
||||||
"gfx1030"
|
"gfx1030"
|
||||||
"gfx1100"
|
"gfx1100"
|
||||||
"gfx1101"
|
"gfx1101"
|
||||||
@@ -39,7 +30,8 @@ function init_vars {
|
|||||||
}
|
}
|
||||||
$script:cmakeDefs = @(
|
$script:cmakeDefs = @(
|
||||||
"-DBUILD_SHARED_LIBS=on",
|
"-DBUILD_SHARED_LIBS=on",
|
||||||
"-DLLAMA_NATIVE=off"
|
"-DGGML_NATIVE=off",
|
||||||
|
"-DGGML_OPENMP=off"
|
||||||
)
|
)
|
||||||
$script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on")
|
$script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on")
|
||||||
$script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower()
|
$script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower()
|
||||||
@@ -122,8 +114,13 @@ function build {
|
|||||||
& cmake --version
|
& cmake --version
|
||||||
& cmake -S "${script:llamacppDir}" -B $script:buildDir $script:cmakeDefs
|
& cmake -S "${script:llamacppDir}" -B $script:buildDir $script:cmakeDefs
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
||||||
write-host "building with: cmake --build $script:buildDir --config $script:config $($script:cmakeTargets | ForEach-Object { `"--target`", $_ })"
|
if ($cmakeDefs -contains "-G") {
|
||||||
& cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ })
|
$extra=@("-j8")
|
||||||
|
} else {
|
||||||
|
$extra= @("--", "/p:CL_MPcount=8")
|
||||||
|
}
|
||||||
|
write-host "building with: cmake --build $script:buildDir --config $script:config $($script:cmakeTargets | ForEach-Object { `"--target`", $_ }) $extra"
|
||||||
|
& cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ }) $extra
|
||||||
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
|
||||||
# Rearrange output to be consistent between different generators
|
# Rearrange output to be consistent between different generators
|
||||||
if ($null -ne ${script:config} -And (test-path -path "${script:buildDir}/bin/${script:config}" ) ) {
|
if ($null -ne ${script:config} -And (test-path -path "${script:buildDir}/bin/${script:config}" ) ) {
|
||||||
@@ -176,9 +173,9 @@ function cleanup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||||
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||||
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||||
|
|
||||||
|
|
||||||
function build_static() {
|
function build_static() {
|
||||||
@@ -198,12 +195,13 @@ function build_static() {
|
|||||||
"-DCMAKE_C_COMPILER=gcc.exe",
|
"-DCMAKE_C_COMPILER=gcc.exe",
|
||||||
"-DCMAKE_CXX_COMPILER=g++.exe",
|
"-DCMAKE_CXX_COMPILER=g++.exe",
|
||||||
"-DBUILD_SHARED_LIBS=off",
|
"-DBUILD_SHARED_LIBS=off",
|
||||||
"-DLLAMA_NATIVE=off",
|
"-DGGML_NATIVE=off",
|
||||||
"-DLLAMA_AVX=off",
|
"-DGGML_AVX=off",
|
||||||
"-DLLAMA_AVX2=off",
|
"-DGGML_AVX2=off",
|
||||||
"-DLLAMA_AVX512=off",
|
"-DGGML_AVX512=off",
|
||||||
"-DLLAMA_F16C=off",
|
"-DGGML_F16C=off",
|
||||||
"-DLLAMA_FMA=off")
|
"-DGGML_FMA=off",
|
||||||
|
"-DGGML_OPENMP=off")
|
||||||
$script:buildDir="../build/windows/${script:ARCH}_static"
|
$script:buildDir="../build/windows/${script:ARCH}_static"
|
||||||
write-host "Building static library"
|
write-host "Building static library"
|
||||||
build
|
build
|
||||||
@@ -217,7 +215,7 @@ function build_cpu($gen_arch) {
|
|||||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) {
|
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) {
|
||||||
# remaining llama.cpp builds use MSVC
|
# remaining llama.cpp builds use MSVC
|
||||||
init_vars
|
init_vars
|
||||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DLLAMA_AVX=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs
|
$script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
|
||||||
$script:buildDir="../build/windows/${script:ARCH}/cpu"
|
$script:buildDir="../build/windows/${script:ARCH}/cpu"
|
||||||
$script:distDir="$script:DIST_BASE\cpu"
|
$script:distDir="$script:DIST_BASE\cpu"
|
||||||
write-host "Building LCD CPU"
|
write-host "Building LCD CPU"
|
||||||
@@ -232,7 +230,7 @@ function build_cpu($gen_arch) {
|
|||||||
function build_cpu_avx() {
|
function build_cpu_avx() {
|
||||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) {
|
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) {
|
||||||
init_vars
|
init_vars
|
||||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs
|
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
|
||||||
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx"
|
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx"
|
||||||
$script:distDir="$script:DIST_BASE\cpu_avx"
|
$script:distDir="$script:DIST_BASE\cpu_avx"
|
||||||
write-host "Building AVX CPU"
|
write-host "Building AVX CPU"
|
||||||
@@ -247,7 +245,7 @@ function build_cpu_avx() {
|
|||||||
function build_cpu_avx2() {
|
function build_cpu_avx2() {
|
||||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) {
|
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) {
|
||||||
init_vars
|
init_vars
|
||||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=on", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=on", "-DLLAMA_F16C=on") + $script:cmakeDefs
|
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=on", "-DGGML_AVX512=off", "-DGGML_FMA=on", "-DGGML_F16C=on") + $script:cmakeDefs
|
||||||
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx2"
|
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx2"
|
||||||
$script:distDir="$script:DIST_BASE\cpu_avx2"
|
$script:distDir="$script:DIST_BASE\cpu_avx2"
|
||||||
write-host "Building AVX2 CPU"
|
write-host "Building AVX2 CPU"
|
||||||
@@ -270,7 +268,15 @@ function build_cuda() {
|
|||||||
init_vars
|
init_vars
|
||||||
$script:buildDir="../build/windows/${script:ARCH}/cuda$script:CUDA_VARIANT"
|
$script:buildDir="../build/windows/${script:ARCH}/cuda$script:CUDA_VARIANT"
|
||||||
$script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT"
|
$script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT"
|
||||||
$script:cmakeDefs += @("-A", "x64", "-DLLAMA_CUDA=ON", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR", "-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}")
|
$script:cmakeDefs += @(
|
||||||
|
"-A", "x64",
|
||||||
|
"-DGGML_CUDA=ON",
|
||||||
|
"-DGGML_AVX=on",
|
||||||
|
"-DGGML_AVX2=off",
|
||||||
|
"-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR",
|
||||||
|
"-DCMAKE_CUDA_FLAGS=-t8",
|
||||||
|
"-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}"
|
||||||
|
)
|
||||||
if ($null -ne $env:OLLAMA_CUSTOM_CUDA_DEFS) {
|
if ($null -ne $env:OLLAMA_CUSTOM_CUDA_DEFS) {
|
||||||
write-host "OLLAMA_CUSTOM_CUDA_DEFS=`"${env:OLLAMA_CUSTOM_CUDA_DEFS}`""
|
write-host "OLLAMA_CUSTOM_CUDA_DEFS=`"${env:OLLAMA_CUSTOM_CUDA_DEFS}`""
|
||||||
$script:cmakeDefs +=@("${env:OLLAMA_CUSTOM_CUDA_DEFS}")
|
$script:cmakeDefs +=@("${env:OLLAMA_CUSTOM_CUDA_DEFS}")
|
||||||
@@ -280,17 +286,19 @@ function build_cuda() {
|
|||||||
sign
|
sign
|
||||||
install
|
install
|
||||||
|
|
||||||
write-host "copying CUDA dependencies to ${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||||
cp "${script:CUDA_LIB_DIR}\cudart64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
md "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\" -ea 0 > $null
|
||||||
cp "${script:CUDA_LIB_DIR}\cublas64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
write-host "copying CUDA dependencies to ${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||||
cp "${script:CUDA_LIB_DIR}\cublasLt64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\"
|
cp "${script:CUDA_LIB_DIR}\cudart64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||||
|
cp "${script:CUDA_LIB_DIR}\cublas64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||||
|
cp "${script:CUDA_LIB_DIR}\cublasLt64_*.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\cuda\"
|
||||||
} else {
|
} else {
|
||||||
write-host "Skipping CUDA generation step"
|
write-host "Skipping CUDA generation step"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function build_oneapi() {
|
function build_oneapi() {
|
||||||
if ((-not "${env:OLLAMA_SKIP_CUDA_GENERATE}") -and ("${env:ONEAPI_ROOT}")) {
|
if ((-not "${env:OLLAMA_SKIP_ONEAPI_GENERATE}") -and ("${env:ONEAPI_ROOT}")) {
|
||||||
# Get oneAPI version
|
# Get oneAPI version
|
||||||
$script:ONEAPI_VERSION = icpx --version
|
$script:ONEAPI_VERSION = icpx --version
|
||||||
$script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value
|
$script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value
|
||||||
@@ -302,7 +310,7 @@ function build_oneapi() {
|
|||||||
$script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT"
|
$script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT"
|
||||||
$script:cmakeDefs += @(
|
$script:cmakeDefs += @(
|
||||||
"-G", "MinGW Makefiles",
|
"-G", "MinGW Makefiles",
|
||||||
"-DLLAMA_SYCL=ON",
|
"-DGGML_SYCL=ON",
|
||||||
"-DCMAKE_C_COMPILER=icx",
|
"-DCMAKE_C_COMPILER=icx",
|
||||||
"-DCMAKE_CXX_COMPILER=icx",
|
"-DCMAKE_CXX_COMPILER=icx",
|
||||||
"-DCMAKE_BUILD_TYPE=Release"
|
"-DCMAKE_BUILD_TYPE=Release"
|
||||||
@@ -317,16 +325,18 @@ function build_oneapi() {
|
|||||||
sign
|
sign
|
||||||
install
|
install
|
||||||
|
|
||||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libirngmd.dll" "${script:distDir}"
|
rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libmmd.dll" "${script:distDir}"
|
md "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\" -ea 0 > $null
|
||||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_level_zero.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libirngmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_unified_runtime.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libmmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_win_proxy_loader.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_level_zero.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\svml_dispmd.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_unified_runtime.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\sycl7.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_win_proxy_loader.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_core.2.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\svml_dispmd.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_sycl_blas.4.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\sycl7.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_tbb_thread.2.dll" "${script:distDir}"
|
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_core.2.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
|
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_sycl_blas.4.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
|
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_tbb_thread.2.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\oneapi\"
|
||||||
} else {
|
} else {
|
||||||
Write-Host "Skipping oneAPI generation step"
|
Write-Host "Skipping oneAPI generation step"
|
||||||
}
|
}
|
||||||
@@ -346,10 +356,11 @@ function build_rocm() {
|
|||||||
"-G", "Ninja",
|
"-G", "Ninja",
|
||||||
"-DCMAKE_C_COMPILER=clang.exe",
|
"-DCMAKE_C_COMPILER=clang.exe",
|
||||||
"-DCMAKE_CXX_COMPILER=clang++.exe",
|
"-DCMAKE_CXX_COMPILER=clang++.exe",
|
||||||
"-DLLAMA_HIPBLAS=on",
|
"-DGGML_HIPBLAS=on",
|
||||||
|
"-DLLAMA_CUDA_NO_PEER_COPY=on",
|
||||||
"-DHIP_PLATFORM=amd",
|
"-DHIP_PLATFORM=amd",
|
||||||
"-DLLAMA_AVX=on",
|
"-DGGML_AVX=on",
|
||||||
"-DLLAMA_AVX2=off",
|
"-DGGML_AVX2=off",
|
||||||
"-DCMAKE_POSITION_INDEPENDENT_CODE=on",
|
"-DCMAKE_POSITION_INDEPENDENT_CODE=on",
|
||||||
"-DAMDGPU_TARGETS=$(amdGPUs)",
|
"-DAMDGPU_TARGETS=$(amdGPUs)",
|
||||||
"-DGPU_TARGETS=$(amdGPUs)"
|
"-DGPU_TARGETS=$(amdGPUs)"
|
||||||
@@ -375,7 +386,6 @@ function build_rocm() {
|
|||||||
sign
|
sign
|
||||||
install
|
install
|
||||||
|
|
||||||
# Assumes v5.7, may need adjustments for v6
|
|
||||||
rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\"
|
rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\"
|
||||||
md "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\rocblas\library\" -ea 0 > $null
|
md "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\rocblas\library\" -ea 0 > $null
|
||||||
cp "${env:HIP_PATH}\bin\hipblas.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\"
|
cp "${env:HIP_PATH}\bin\hipblas.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\"
|
||||||
|
13
llm/ggla.go
13
llm/ggla.go
@@ -53,7 +53,7 @@ func (llm *ggla) Tensors() Tensors {
|
|||||||
return llm.tensors
|
return llm.tensors
|
||||||
}
|
}
|
||||||
|
|
||||||
func (llm *ggla) decode(rs io.ReadSeeker) error {
|
func (llm *ggla) decode(rs io.ReadSeeker) (retErr error) {
|
||||||
var r uint32
|
var r uint32
|
||||||
if err := binary.Read(rs, binary.LittleEndian, &r); err != nil {
|
if err := binary.Read(rs, binary.LittleEndian, &r); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -69,9 +69,18 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
|
|||||||
for {
|
for {
|
||||||
var dims uint32
|
var dims uint32
|
||||||
if err := binary.Read(rs, binary.LittleEndian, &dims); err != nil {
|
if err := binary.Read(rs, binary.LittleEndian, &dims); err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if errors.Is(retErr, io.EOF) {
|
||||||
|
retErr = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
var namesize uint32
|
var namesize uint32
|
||||||
if err := binary.Read(rs, binary.LittleEndian, &namesize); err != nil {
|
if err := binary.Read(rs, binary.LittleEndian, &namesize); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -108,7 +117,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := rs.Seek((offset+31)&-32, io.SeekStart); err != nil {
|
if _, err := rs.Seek((offset+31)&-32-offset, io.SeekCurrent); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
117
llm/ggml.go
117
llm/ggml.go
@@ -6,6 +6,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/util/bufioutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GGML struct {
|
type GGML struct {
|
||||||
@@ -69,6 +71,30 @@ func (kv KV) HeadCountKV() uint64 {
|
|||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (kv KV) EmbeddingHeadCount() uint64 {
|
||||||
|
if heads := kv.HeadCount(); heads > 0 {
|
||||||
|
return kv.EmbeddingLength() / kv.HeadCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) EmbeddingHeadCountK() uint64 {
|
||||||
|
if k := kv.u64(fmt.Sprintf("%s.attention.key_length", kv.Architecture())); k > 0 {
|
||||||
|
return k
|
||||||
|
}
|
||||||
|
|
||||||
|
return kv.EmbeddingHeadCount()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv KV) EmbeddingHeadCountV() uint64 {
|
||||||
|
if v := kv.u64(fmt.Sprintf("%s.attention.value_length", kv.Architecture())); v > 0 {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
return kv.EmbeddingHeadCount()
|
||||||
|
}
|
||||||
|
|
||||||
func (kv KV) GQA() uint64 {
|
func (kv KV) GQA() uint64 {
|
||||||
return kv.HeadCount() / kv.HeadCountKV()
|
return kv.HeadCount() / kv.HeadCountKV()
|
||||||
}
|
}
|
||||||
@@ -81,6 +107,11 @@ func (kv KV) ContextLength() uint64 {
|
|||||||
return kv.u64(fmt.Sprintf("%s.context_length", kv.Architecture()))
|
return kv.u64(fmt.Sprintf("%s.context_length", kv.Architecture()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (kv KV) ChatTemplate() string {
|
||||||
|
s, _ := kv["tokenizer.chat_template"].(string)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
type Tensors []*Tensor
|
type Tensors []*Tensor
|
||||||
|
|
||||||
func (ts Tensors) Layers() map[string]Layer {
|
func (ts Tensors) Layers() map[string]Layer {
|
||||||
@@ -249,7 +280,18 @@ func DetectGGMLType(b []byte) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) {
|
// DecodeGGML decodes a GGML model from the given reader.
|
||||||
|
//
|
||||||
|
// It collects array values for arrays with a size less than or equal to
|
||||||
|
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
|
||||||
|
// the maxArraySize is negative, all arrays are collected.
|
||||||
|
func DecodeGGML(rs io.ReadSeeker, maxArraySize int) (*GGML, int64, error) {
|
||||||
|
if maxArraySize == 0 {
|
||||||
|
maxArraySize = 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
rs = bufioutil.NewBufferedSeeker(rs, 32<<10)
|
||||||
|
|
||||||
var magic uint32
|
var magic uint32
|
||||||
if err := binary.Read(rs, binary.LittleEndian, &magic); err != nil {
|
if err := binary.Read(rs, binary.LittleEndian, &magic); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
@@ -262,17 +304,15 @@ func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) {
|
|||||||
case FILE_MAGIC_GGLA:
|
case FILE_MAGIC_GGLA:
|
||||||
c = &containerGGLA{}
|
c = &containerGGLA{}
|
||||||
case FILE_MAGIC_GGUF_LE:
|
case FILE_MAGIC_GGUF_LE:
|
||||||
c = &containerGGUF{ByteOrder: binary.LittleEndian}
|
c = &containerGGUF{ByteOrder: binary.LittleEndian, maxArraySize: maxArraySize}
|
||||||
case FILE_MAGIC_GGUF_BE:
|
case FILE_MAGIC_GGUF_BE:
|
||||||
c = &containerGGUF{ByteOrder: binary.BigEndian}
|
c = &containerGGUF{ByteOrder: binary.BigEndian, maxArraySize: maxArraySize}
|
||||||
default:
|
default:
|
||||||
return nil, 0, errors.New("invalid file magic")
|
return nil, 0, errors.New("invalid file magic")
|
||||||
}
|
}
|
||||||
|
|
||||||
model, err := c.Decode(rs)
|
model, err := c.Decode(rs)
|
||||||
if errors.Is(err, io.EOF) {
|
if err != nil {
|
||||||
// noop
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -292,7 +332,10 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
|||||||
embedding := llm.KV().EmbeddingLength()
|
embedding := llm.KV().EmbeddingLength()
|
||||||
heads := llm.KV().HeadCount()
|
heads := llm.KV().HeadCount()
|
||||||
headsKV := llm.KV().HeadCountKV()
|
headsKV := llm.KV().HeadCountKV()
|
||||||
vocab := uint64(len(llm.KV()["tokenizer.ggml.tokens"].([]any)))
|
vocab := uint64(llm.KV()["tokenizer.ggml.tokens"].(*array).size)
|
||||||
|
|
||||||
|
embeddingHeads := llm.KV().EmbeddingHeadCount()
|
||||||
|
embeddingHeadsK := llm.KV().EmbeddingHeadCountK()
|
||||||
|
|
||||||
layers := llm.Tensors().Layers()
|
layers := llm.Tensors().Layers()
|
||||||
|
|
||||||
@@ -302,7 +345,8 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
|||||||
|
|
||||||
partialOffload = 4 * batch * embedding
|
partialOffload = 4 * batch * embedding
|
||||||
partialOffload += max(
|
partialOffload += max(
|
||||||
4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embedding/heads*headsKV),
|
// 4*batch*(4+6*embedding+context*(2*heads)+llm.KV().GQA()),
|
||||||
|
4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embeddingHeads*headsKV),
|
||||||
4*batch*(embedding+vocab)+embedding*vocab*105/128,
|
4*batch*(embedding+vocab)+embedding*vocab*105/128,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -310,21 +354,30 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
|||||||
// mixtral 8x22b
|
// mixtral 8x22b
|
||||||
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
|
ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32))
|
||||||
partialOffload = max(
|
partialOffload = max(
|
||||||
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV),
|
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embeddingHeads*headsKV),
|
||||||
4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch),
|
4*(context*batch*heads+context*embeddingHeads*headsKV+batch*1024+embeddingHeads*headsKV*batch),
|
||||||
)
|
)
|
||||||
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
|
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
|
||||||
// mixtral 8x7b
|
// mixtral 8x7b
|
||||||
ffnGateWeight1 := ffnGateWeight.Shape[1]
|
ffnGateWeight1 := ffnGateWeight.Shape[1]
|
||||||
fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1)
|
fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1)
|
||||||
partialOffload = max(
|
partialOffload = max(
|
||||||
4*batch*(3+embedding/heads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16,
|
4*batch*(3+embeddingHeads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16,
|
||||||
4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16),
|
4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
case "gemma":
|
case "gemma", "gemma2":
|
||||||
fullOffload = 4 * batch * (embedding + vocab)
|
fullOffload = max(
|
||||||
partialOffload = 4*batch*(2*embedding+vocab+1) + embedding*vocab*105/128
|
4*batch*(embedding+vocab),
|
||||||
|
4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads),
|
||||||
|
)
|
||||||
|
|
||||||
|
partialOffload = max(
|
||||||
|
4*embedding*batch+embedding*vocab*105/128+4*vocab*batch,
|
||||||
|
4*batch*(2*embedding+1+2*embeddingHeadsK*heads+context+context*heads)+
|
||||||
|
4*embeddingHeadsK*context*8+
|
||||||
|
embedding*embeddingHeadsK*heads*9/16,
|
||||||
|
)
|
||||||
case "command-r":
|
case "command-r":
|
||||||
fullOffload = max(
|
fullOffload = max(
|
||||||
4*batch*(embedding+vocab),
|
4*batch*(embedding+vocab),
|
||||||
@@ -361,6 +414,42 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui
|
|||||||
4*batch*(vocab+2*embedding),
|
4*batch*(vocab+2*embedding),
|
||||||
fullOffload,
|
fullOffload,
|
||||||
)
|
)
|
||||||
|
case "deepseek2":
|
||||||
|
fullOffload = max(
|
||||||
|
4*batch*(3*embedding+vocab),
|
||||||
|
4*batch*(3*embedding+2+context*(1+headsKV)+2*embeddingHeadsK*headsKV),
|
||||||
|
)
|
||||||
|
|
||||||
|
partialOffload = max(
|
||||||
|
4*batch*(3*embedding+vocab)+embedding*vocab*105/128,
|
||||||
|
4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16,
|
||||||
|
)
|
||||||
|
case "chatglm":
|
||||||
|
fullOffload = 4 * batch * (embedding + vocab)
|
||||||
|
partialOffload = 4*batch*(embedding+vocab) + embedding*vocab*105/128
|
||||||
|
if qkvBias, ok := layers["blk.0"]["attn_qkv.bias"]; ok {
|
||||||
|
fullOffload = max(
|
||||||
|
fullOffload,
|
||||||
|
4*batch*(2+
|
||||||
|
2*embedding+
|
||||||
|
context+
|
||||||
|
context*heads+
|
||||||
|
embeddingHeadsK*heads+
|
||||||
|
qkvBias.Shape[0]),
|
||||||
|
)
|
||||||
|
|
||||||
|
partialOffload = max(
|
||||||
|
partialOffload,
|
||||||
|
4*batch*(1+
|
||||||
|
2*embedding+
|
||||||
|
embeddingHeadsK*heads+
|
||||||
|
context+
|
||||||
|
context*heads)+
|
||||||
|
4*embeddingHeadsK*context+
|
||||||
|
4*context*embeddingHeadsK+
|
||||||
|
4*qkvBias.Shape[0],
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
1
llm/ggml_test.go
Normal file
1
llm/ggml_test.go
Normal file
@@ -0,0 +1 @@
|
|||||||
|
package llm
|
144
llm/gguf.go
144
llm/gguf.go
@@ -3,11 +3,10 @@ package llm
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"log/slog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type containerGGUF struct {
|
type containerGGUF struct {
|
||||||
@@ -29,6 +28,12 @@ type containerGGUF struct {
|
|||||||
NumTensor uint64
|
NumTensor uint64
|
||||||
NumKV uint64
|
NumKV uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
maxArraySize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *containerGGUF) canCollectArray(size int) bool {
|
||||||
|
return c.maxArraySize < 0 || size <= c.maxArraySize
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerGGUF) Name() string {
|
func (c *containerGGUF) Name() string {
|
||||||
@@ -54,7 +59,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
model := newGGUF(c)
|
model := newGGUF(c)
|
||||||
slog.Debug(fmt.Sprintf("model = %#v", model))
|
|
||||||
if err := model.Decode(rs); err != nil {
|
if err := model.Decode(rs); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -85,6 +89,8 @@ type gguf struct {
|
|||||||
tensors []*Tensor
|
tensors []*Tensor
|
||||||
|
|
||||||
parameters uint64
|
parameters uint64
|
||||||
|
|
||||||
|
scratch [16 << 10]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func newGGUF(container *containerGGUF) *gguf {
|
func newGGUF(container *containerGGUF) *gguf {
|
||||||
@@ -181,34 +187,34 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// decode tensors
|
// decode tensors
|
||||||
for i := 0; uint64(i) < llm.numTensor(); i++ {
|
for range llm.numTensor() {
|
||||||
name, err := readGGUFString(llm, rs)
|
name, err := readGGUFString(llm, rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to read tensor name: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dims is the number of dimensions in the tensor
|
// dims is the number of dimensions in the tensor
|
||||||
dims, err := readGGUF[uint32](llm, rs)
|
dims, err := readGGUF[uint32](llm, rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to read tensor dimensions: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
shape := [4]uint64{1, 1, 1, 1}
|
shape := [4]uint64{1, 1, 1, 1}
|
||||||
for i := 0; uint32(i) < dims; i++ {
|
for i := 0; uint32(i) < dims; i++ {
|
||||||
shape[i], err = readGGUF[uint64](llm, rs)
|
shape[i], err = readGGUF[uint64](llm, rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to read tensor shape: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kind, err := readGGUF[uint32](llm, rs)
|
kind, err := readGGUF[uint32](llm, rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to read tensor kind: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
offset, err := readGGUF[uint64](llm, rs)
|
offset, err := readGGUF[uint64](llm, rs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to read tensor offset: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
tensor := Tensor{
|
tensor := Tensor{
|
||||||
@@ -230,24 +236,19 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
|
|||||||
alignment = 32
|
alignment = 32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, tensor := range llm.tensors {
|
||||||
offset, err := rs.Seek(0, io.SeekCurrent)
|
offset, err := rs.Seek(0, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to get current offset: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
padding := llm.padding(offset, int64(alignment))
|
padding := llm.padding(offset, int64(alignment))
|
||||||
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
|
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to seek to init padding: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tensor := range llm.tensors {
|
|
||||||
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
|
if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to seek to tensor: %w", err)
|
||||||
}
|
|
||||||
|
|
||||||
padding := llm.padding(int64(tensor.Size()), int64(alignment))
|
|
||||||
if _, err := rs.Seek(padding, io.SeekCurrent); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,22 +286,48 @@ func readGGUFV1String(llm *gguf, r io.Reader) (string, error) {
|
|||||||
return b.String(), nil
|
return b.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func discardGGUFString(llm *gguf, r io.Reader) error {
|
||||||
|
buf := llm.scratch[:8]
|
||||||
|
_, err := io.ReadFull(r, buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
size := int(llm.ByteOrder.Uint64(buf))
|
||||||
|
for size > 0 {
|
||||||
|
n, err := r.Read(llm.scratch[:min(size, cap(llm.scratch))])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
size -= n
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func readGGUFString(llm *gguf, r io.Reader) (string, error) {
|
func readGGUFString(llm *gguf, r io.Reader) (string, error) {
|
||||||
if llm.Version == 1 {
|
if llm.Version == 1 {
|
||||||
return readGGUFV1String(llm, r)
|
return readGGUFV1String(llm, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
var length uint64
|
buf := llm.scratch[:8]
|
||||||
if err := binary.Read(r, llm.ByteOrder, &length); err != nil {
|
_, err := io.ReadFull(r, buf)
|
||||||
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
var b bytes.Buffer
|
length := int(llm.ByteOrder.Uint64(buf))
|
||||||
if _, err := io.CopyN(&b, r, int64(length)); err != nil {
|
if length > len(llm.scratch) {
|
||||||
|
buf = make([]byte, length)
|
||||||
|
} else {
|
||||||
|
buf = llm.scratch[:length]
|
||||||
|
}
|
||||||
|
clear(buf)
|
||||||
|
|
||||||
|
_, err = io.ReadFull(r, buf)
|
||||||
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
return string(buf), nil
|
||||||
return b.String(), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeGGUFString(llm *gguf, w io.Writer, s string) error {
|
func writeGGUFString(llm *gguf, w io.Writer, s string) error {
|
||||||
@@ -316,7 +343,16 @@ func writeGGUFString(llm *gguf, w io.Writer, s string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
|
type array struct {
|
||||||
|
size int
|
||||||
|
values []any
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *array) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(a.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readGGUFV1Array(llm *gguf, r io.Reader) (*array, error) {
|
||||||
t, err := readGGUF[uint32](llm, r)
|
t, err := readGGUF[uint32](llm, r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -327,7 +363,12 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; uint32(i) < n; i++ {
|
a := &array{size: int(n)}
|
||||||
|
if llm.canCollectArray(int(n)) {
|
||||||
|
a.values = make([]any, 0, int(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range n {
|
||||||
var e any
|
var e any
|
||||||
switch t {
|
switch t {
|
||||||
case ggufTypeUint8:
|
case ggufTypeUint8:
|
||||||
@@ -361,13 +402,15 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
a = append(a, e)
|
if a.values != nil {
|
||||||
|
a.values[i] = e
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
func readGGUFArray(llm *gguf, r io.Reader) (*array, error) {
|
||||||
if llm.Version == 1 {
|
if llm.Version == 1 {
|
||||||
return readGGUFV1Array(llm, r)
|
return readGGUFV1Array(llm, r)
|
||||||
}
|
}
|
||||||
@@ -382,7 +425,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; uint64(i) < n; i++ {
|
a := &array{size: int(n)}
|
||||||
|
if llm.canCollectArray(int(n)) {
|
||||||
|
a.values = make([]any, int(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range n {
|
||||||
var e any
|
var e any
|
||||||
switch t {
|
switch t {
|
||||||
case ggufTypeUint8:
|
case ggufTypeUint8:
|
||||||
@@ -408,7 +456,11 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
|||||||
case ggufTypeBool:
|
case ggufTypeBool:
|
||||||
e, err = readGGUF[bool](llm, r)
|
e, err = readGGUF[bool](llm, r)
|
||||||
case ggufTypeString:
|
case ggufTypeString:
|
||||||
|
if a.values != nil {
|
||||||
e, err = readGGUFString(llm, r)
|
e, err = readGGUFString(llm, r)
|
||||||
|
} else {
|
||||||
|
err = discardGGUFString(llm, r)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("invalid array type: %d", t)
|
return nil, fmt.Errorf("invalid array type: %d", t)
|
||||||
}
|
}
|
||||||
@@ -416,10 +468,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
a = append(a, e)
|
if a.values != nil {
|
||||||
|
a.values[i] = e
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return a, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeGGUFArray[S ~[]E, E any](llm *gguf, w io.Writer, t uint32, s S) error {
|
func writeGGUFArray[S ~[]E, E any](llm *gguf, w io.Writer, t uint32, s S) error {
|
||||||
@@ -592,8 +646,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
dims := 0
|
var dims int
|
||||||
for cnt := 0; cnt < len(tensor.Shape); cnt++ {
|
for cnt := range len(tensor.Shape) {
|
||||||
if tensor.Shape[cnt] > 0 {
|
if tensor.Shape[cnt] > 0 {
|
||||||
dims++
|
dims++
|
||||||
}
|
}
|
||||||
@@ -603,8 +657,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < dims; i++ {
|
for i := range dims {
|
||||||
if err := binary.Write(ws, llm.ByteOrder, uint64(tensor.Shape[dims-1-i])); err != nil {
|
if err := binary.Write(ws, llm.ByteOrder, tensor.Shape[dims-1-i]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -618,22 +672,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
offset, err := ws.Seek(0, io.SeekCurrent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var alignment int64 = 32
|
var alignment int64 = 32
|
||||||
padding := llm.padding(offset, alignment)
|
|
||||||
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tensor := range tensors {
|
for _, tensor := range tensors {
|
||||||
if _, err := tensor.WriteTo(ws); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
offset, err := ws.Seek(0, io.SeekCurrent)
|
offset, err := ws.Seek(0, io.SeekCurrent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -643,6 +683,10 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
|||||||
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
|
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, err := tensor.WriteTo(ws); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
Submodule llm/llama.cpp updated: 74f33adf5f...a8db2a9ce6
17
llm/llm.go
17
llm/llm.go
@@ -1,12 +1,13 @@
|
|||||||
package llm
|
package llm
|
||||||
|
|
||||||
// #cgo CFLAGS: -Illama.cpp
|
// #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include
|
||||||
// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/libllama.a -lstdc++
|
// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread
|
||||||
// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/libllama.a -lstdc++
|
// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal
|
||||||
// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/libllama.a -static -lstdc++
|
// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src
|
||||||
// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/libllama.a -static -lstdc++
|
// #cgo windows,amd64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src
|
||||||
// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/libllama.a -lstdc++
|
// #cgo windows,arm64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src
|
||||||
// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/libllama.a -lstdc++
|
// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src
|
||||||
|
// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
|
||||||
// #include <stdlib.h>
|
// #include <stdlib.h>
|
||||||
// #include "llama.h"
|
// #include "llama.h"
|
||||||
import "C"
|
import "C"
|
||||||
@@ -32,7 +33,7 @@ func Quantize(infile, outfile string, ftype fileType) error {
|
|||||||
params.ftype = ftype.Value()
|
params.ftype = ftype.Value()
|
||||||
|
|
||||||
if rc := C.llama_model_quantize(cinfile, coutfile, ¶ms); rc != 0 {
|
if rc := C.llama_model_quantize(cinfile, coutfile, ¶ms); rc != 0 {
|
||||||
return fmt.Errorf("llama_model_quantize: %d", rc)
|
return fmt.Errorf("failed to quantize model. This model architecture may not be supported, or you may need to upgrade Ollama to the latest version")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
315
llm/memory.go
315
llm/memory.go
@@ -3,11 +3,12 @@ package llm
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
"github.com/ollama/ollama/gpu"
|
"github.com/ollama/ollama/gpu"
|
||||||
"github.com/ollama/ollama/envconfig"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This algorithm looks for a complete fit to determine if we need to unload other models
|
// This algorithm looks for a complete fit to determine if we need to unload other models
|
||||||
@@ -16,7 +17,8 @@ func PredictServerFit(allGpus gpu.GpuInfoList, ggml *GGML, adapters, projectors
|
|||||||
var estimatedVRAM uint64
|
var estimatedVRAM uint64
|
||||||
for _, gpus := range allGpus.ByLibrary() {
|
for _, gpus := range allGpus.ByLibrary() {
|
||||||
var layerCount int
|
var layerCount int
|
||||||
layerCount, estimatedVRAM, _ = EstimateGPULayers(gpus, ggml, projectors, opts)
|
estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||||
|
layerCount, estimatedVRAM = estimate.Layers, estimate.VRAMSize
|
||||||
if opts.NumGPU < 0 {
|
if opts.NumGPU < 0 {
|
||||||
if layerCount > 0 && layerCount >= int(ggml.KV().BlockCount()+1) {
|
if layerCount > 0 && layerCount >= int(ggml.KV().BlockCount()+1) {
|
||||||
return true, estimatedVRAM
|
return true, estimatedVRAM
|
||||||
@@ -30,24 +32,76 @@ func PredictServerFit(allGpus gpu.GpuInfoList, ggml *GGML, adapters, projectors
|
|||||||
return false, estimatedVRAM
|
return false, estimatedVRAM
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MemoryEstimate struct {
|
||||||
|
// How many layers we predict we can load
|
||||||
|
Layers int
|
||||||
|
|
||||||
|
// The size of the graph which occupies the main GPU
|
||||||
|
Graph uint64
|
||||||
|
|
||||||
|
// How much VRAM will be allocated given the number of layers we predict
|
||||||
|
VRAMSize uint64
|
||||||
|
|
||||||
|
// The total size of the model if loaded into VRAM. If all layers are loaded, VRAMSize == TotalSize
|
||||||
|
TotalSize uint64
|
||||||
|
|
||||||
|
// For multi-GPU scenarios, this provides the tensor split parameter
|
||||||
|
TensorSplit string
|
||||||
|
|
||||||
|
// For multi-GPU scenarios, this is the size in bytes per GPU
|
||||||
|
GPUSizes []uint64
|
||||||
|
|
||||||
|
// internal fields for logging purposes
|
||||||
|
inferenceLibrary string
|
||||||
|
layersRequested int
|
||||||
|
layersModel int
|
||||||
|
availableList []string
|
||||||
|
kv uint64
|
||||||
|
allocationsList []string
|
||||||
|
memoryWeights uint64
|
||||||
|
memoryLayerOutput uint64
|
||||||
|
graphFullOffload uint64
|
||||||
|
graphPartialOffload uint64
|
||||||
|
}
|
||||||
|
|
||||||
// Given a model and one or more GPU targets, predict how many layers and bytes we can load, and the total size
|
// Given a model and one or more GPU targets, predict how many layers and bytes we can load, and the total size
|
||||||
// The GPUs provided must all be the same Library
|
// The GPUs provided must all be the same Library
|
||||||
func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts api.Options) (int, uint64, uint64) {
|
func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts api.Options) MemoryEstimate {
|
||||||
var memoryAvailable uint64
|
// Graph size for a partial offload, applies to all GPUs
|
||||||
for _, info := range gpus {
|
var graphPartialOffload uint64
|
||||||
memoryAvailable += info.FreeMemory
|
|
||||||
}
|
|
||||||
if envconfig.MaxVRAM > 0 {
|
|
||||||
memoryAvailable = envconfig.MaxVRAM
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Debug("evaluating", "library", gpus[0].Library, "gpu_count", len(gpus), "available", format.HumanBytes2(memoryAvailable))
|
// Graph size when all layers are offloaded, applies to all GPUs
|
||||||
|
var graphFullOffload uint64
|
||||||
|
|
||||||
// TODO - this is probably wrong, first GPU vs secondaries will have different overheads
|
// Final graph offload once we know full or partial
|
||||||
memoryMinimum := gpus[0].MinimumMemory
|
var graphOffload uint64
|
||||||
|
|
||||||
|
// Projectors loaded into GPU0 only
|
||||||
|
var projectorSize uint64
|
||||||
|
|
||||||
|
// Conditional output size on GPU 0
|
||||||
|
var memoryLayerOutput uint64
|
||||||
|
|
||||||
|
// The sizes of a layer
|
||||||
|
var layerSize uint64
|
||||||
|
|
||||||
|
// The sum of all the layer sizes (just for logging)
|
||||||
|
var memoryWeights uint64
|
||||||
|
|
||||||
|
// True if all the layers are loaded
|
||||||
|
var fullyLoaded bool
|
||||||
|
|
||||||
|
// Overflow that didn't fit into the GPU
|
||||||
|
var overflow uint64
|
||||||
|
|
||||||
|
availableList := make([]string, len(gpus))
|
||||||
|
for i, gpu := range gpus {
|
||||||
|
availableList[i] = format.HumanBytes2(gpu.FreeMemory)
|
||||||
|
}
|
||||||
|
slog.Debug("evaluating", "library", gpus[0].Library, "gpu_count", len(gpus), "available", availableList)
|
||||||
|
|
||||||
for _, projector := range projectors {
|
for _, projector := range projectors {
|
||||||
memoryMinimum += projectorMemoryRequirements(projector)
|
projectorSize += projectorMemoryRequirements(projector)
|
||||||
|
|
||||||
// multimodal models require at least 2048 context
|
// multimodal models require at least 2048 context
|
||||||
opts.NumCtx = max(opts.NumCtx, 2048)
|
opts.NumCtx = max(opts.NumCtx, 2048)
|
||||||
@@ -56,127 +110,246 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
|
|||||||
layers := ggml.Tensors().Layers()
|
layers := ggml.Tensors().Layers()
|
||||||
// add one layer worth of memory as a buffer
|
// add one layer worth of memory as a buffer
|
||||||
if blk0, ok := layers["blk.0"]; ok {
|
if blk0, ok := layers["blk.0"]; ok {
|
||||||
memoryMinimum += blk0.size()
|
layerSize = blk0.size()
|
||||||
|
} else {
|
||||||
|
slog.Warn("model missing blk.0 layer size")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv
|
// fp16 k,v = sizeof(float16) * n_ctx * n_layer * (n_embd_head_k + n_embd_head_v) * n_head_kv
|
||||||
var kv uint64 = 2 * 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * ggml.KV().EmbeddingLength() / ggml.KV().HeadCount() * ggml.KV().HeadCountKV()
|
var kv uint64 = 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * (ggml.KV().EmbeddingHeadCountK() + ggml.KV().EmbeddingHeadCountV()) * ggml.KV().HeadCountKV()
|
||||||
|
|
||||||
graphPartialOffload, graphFullOffload := ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
|
// KV is proportional to the number of layers
|
||||||
|
layerSize += kv / ggml.KV().BlockCount()
|
||||||
|
|
||||||
|
graphPartialOffload, graphFullOffload = ggml.GraphSize(uint64(opts.NumCtx), uint64(min(opts.NumCtx, opts.NumBatch)))
|
||||||
if graphPartialOffload == 0 {
|
if graphPartialOffload == 0 {
|
||||||
graphPartialOffload = ggml.KV().GQA() * kv / 6
|
graphPartialOffload = ggml.KV().GQA() * kv / 6
|
||||||
}
|
}
|
||||||
|
|
||||||
if graphFullOffload == 0 {
|
if graphFullOffload == 0 {
|
||||||
graphFullOffload = graphPartialOffload
|
graphFullOffload = graphPartialOffload
|
||||||
}
|
}
|
||||||
|
|
||||||
graphFullOffload *= uint64(len(gpus))
|
|
||||||
graphPartialOffload *= uint64(len(gpus))
|
|
||||||
|
|
||||||
// on metal there's no partial offload overhead
|
// on metal there's no partial offload overhead
|
||||||
if gpus[0].Library == "metal" {
|
if gpus[0].Library == "metal" {
|
||||||
graphPartialOffload = graphFullOffload
|
graphPartialOffload = graphFullOffload
|
||||||
|
} else if len(gpus) > 1 {
|
||||||
|
// multigpu should always use the partial graph size
|
||||||
|
graphFullOffload = graphPartialOffload
|
||||||
}
|
}
|
||||||
|
|
||||||
// memoryRequiredTotal represents the memory required for full GPU offloading (all layers)
|
|
||||||
memoryRequiredTotal := memoryMinimum + graphFullOffload
|
|
||||||
|
|
||||||
// memoryRequiredPartial represents the memory required for partial GPU offloading (n > 0, n < layers)
|
|
||||||
memoryRequiredPartial := memoryMinimum + graphPartialOffload
|
|
||||||
|
|
||||||
var memoryLayerOutput uint64
|
|
||||||
if layer, ok := layers["output_norm"]; ok {
|
if layer, ok := layers["output_norm"]; ok {
|
||||||
memoryLayerOutput += layer.size()
|
memoryLayerOutput += layer.size()
|
||||||
}
|
}
|
||||||
|
|
||||||
if layer, ok := layers["output"]; ok {
|
if layer, ok := layers["output"]; ok {
|
||||||
memoryLayerOutput += layer.size()
|
memoryLayerOutput += layer.size()
|
||||||
} else if layer, ok := layers["token_embd"]; ok {
|
} else if layer, ok := layers["token_embd"]; ok {
|
||||||
memoryLayerOutput += layer.size()
|
memoryLayerOutput += layer.size()
|
||||||
}
|
}
|
||||||
|
|
||||||
if gpus[0].Library == "metal" && opts.UseMMap {
|
// Output layer handled at the end if we have space
|
||||||
// memory is preallocated for output tensors
|
gpuZeroOverhead := projectorSize
|
||||||
memoryRequiredTotal += memoryLayerOutput
|
|
||||||
memoryRequiredPartial += memoryLayerOutput
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Reduce set of GPUs to only those that have sufficient space to fit overhead and at least one layer
|
||||||
var layerCount int
|
var layerCount int
|
||||||
for i := 0; i < int(ggml.KV().BlockCount()); i++ {
|
layerCounts := make([]int, len(gpus))
|
||||||
|
gpuAllocations := make([]uint64, len(gpus))
|
||||||
|
type gs struct {
|
||||||
|
i int
|
||||||
|
g *gpu.GpuInfo
|
||||||
|
}
|
||||||
|
gpusWithSpace := []gs{}
|
||||||
|
for i := range gpus {
|
||||||
|
var gzo uint64
|
||||||
|
if len(gpusWithSpace) == 0 {
|
||||||
|
gzo = gpuZeroOverhead
|
||||||
|
}
|
||||||
|
// Only include GPUs that can fit the graph, gpu minimum, the layer buffer and at least more layer
|
||||||
|
if gpus[i].FreeMemory < gzo+max(graphPartialOffload, graphFullOffload)+gpus[i].MinimumMemory+2*layerSize {
|
||||||
|
slog.Debug("gpu has too little memory to allocate any layers", "gpu", gpus[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
gpusWithSpace = append(gpusWithSpace, gs{i, &gpus[i]})
|
||||||
|
gpuAllocations[i] += gpus[i].MinimumMemory + layerSize // We hold off on graph until we know partial vs. full
|
||||||
|
}
|
||||||
|
|
||||||
|
var gpuZeroID int
|
||||||
|
if len(gpusWithSpace) > 0 {
|
||||||
|
gpuZeroID = gpusWithSpace[0].i
|
||||||
|
gpuAllocations[gpuZeroID] += gpuZeroOverhead
|
||||||
|
}
|
||||||
|
|
||||||
|
// For all the layers, find where they can fit on the GPU(s)
|
||||||
|
for i := range int(ggml.KV().BlockCount()) {
|
||||||
|
// Some models have inconsistent layer sizes
|
||||||
if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
|
if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
|
||||||
memoryLayer := blk.size()
|
layerSize = blk.size()
|
||||||
|
layerSize += kv / ggml.KV().BlockCount()
|
||||||
|
}
|
||||||
|
memoryWeights += layerSize
|
||||||
|
|
||||||
// KV is proportional to the number of layers
|
if opts.NumGPU >= 0 && layerCount >= opts.NumGPU {
|
||||||
memoryLayer += kv / ggml.KV().BlockCount()
|
// Stop allocating on GPU(s) once we hit the users target NumGPU
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
memoryRequiredTotal += memoryLayer
|
// distribute the layers across the GPU(s) that have space
|
||||||
if (opts.NumGPU >= 0 && layerCount+1 <= opts.NumGPU) || (opts.NumGPU < 0 && memoryAvailable > memoryRequiredPartial+memoryLayer) {
|
for j := len(gpusWithSpace); j > 0; j-- {
|
||||||
memoryRequiredPartial += memoryLayer
|
g := gpusWithSpace[i%j]
|
||||||
|
used := gpuAllocations[g.i] + max(graphPartialOffload, graphFullOffload)
|
||||||
|
if g.g.FreeMemory > used+layerSize {
|
||||||
|
gpuAllocations[g.i] += layerSize
|
||||||
|
layerCounts[g.i]++
|
||||||
layerCount++
|
layerCount++
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
gpusWithSpace = append(gpusWithSpace[:i%j], gpusWithSpace[i%j+1:]...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if layerCount >= int(ggml.KV().BlockCount()) {
|
||||||
|
fullyLoaded = true
|
||||||
|
} else {
|
||||||
|
for i := layerCount; i < int(ggml.KV().BlockCount()); i++ {
|
||||||
|
overflow += layerSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if gpus[0].Library != "metal" || !opts.UseMMap {
|
// Determine if we need to consider output then find where it fits
|
||||||
// memory was not preallocated for output tensors
|
if memoryLayerOutput > 0 && (opts.NumGPU < 0 || layerCount < opts.NumGPU) {
|
||||||
memoryRequiredTotal += memoryLayerOutput
|
for j := len(gpusWithSpace); j > 0; j-- {
|
||||||
|
g := gpusWithSpace[layerCount%j]
|
||||||
|
used := gpuAllocations[g.i] + max(graphPartialOffload, graphFullOffload)
|
||||||
|
if g.g.FreeMemory > used+memoryLayerOutput {
|
||||||
|
gpuAllocations[g.i] += memoryLayerOutput
|
||||||
|
layerCounts[g.i]++
|
||||||
|
layerCount++
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (opts.NumGPU >= 0 && layerCount+1 <= opts.NumGPU) || (opts.NumGPU < 0 && memoryAvailable > memoryRequiredTotal) {
|
if layerCount < int(ggml.KV().BlockCount())+1 {
|
||||||
layerCount = int(ggml.KV().BlockCount()) + 1
|
fullyLoaded = false
|
||||||
memoryRequiredPartial = memoryRequiredTotal
|
overflow += memoryLayerOutput
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
memoryWeights := memoryRequiredTotal - memoryMinimum - graphFullOffload - kv
|
// Add the applicable (full or partial) graph allocations
|
||||||
|
for i := range gpus {
|
||||||
|
if layerCounts[i] <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fullyLoaded {
|
||||||
|
gpuAllocations[i] += graphFullOffload
|
||||||
|
} else {
|
||||||
|
gpuAllocations[i] += graphPartialOffload
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if fullyLoaded {
|
||||||
|
graphOffload = graphFullOffload
|
||||||
|
} else {
|
||||||
|
graphOffload = graphPartialOffload
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summaries for the log
|
||||||
|
var memoryRequiredPartial, memoryRequiredTotal uint64
|
||||||
|
for i := range gpuAllocations {
|
||||||
|
memoryRequiredPartial += gpuAllocations[i]
|
||||||
|
}
|
||||||
|
memoryRequiredTotal = memoryRequiredPartial + overflow
|
||||||
|
|
||||||
|
tensorSplit := ""
|
||||||
|
if len(gpus) > 1 {
|
||||||
|
splits := make([]string, len(gpus))
|
||||||
|
for i, count := range layerCounts {
|
||||||
|
splits[i] = strconv.Itoa(count)
|
||||||
|
}
|
||||||
|
tensorSplit = strings.Join(splits, ",")
|
||||||
|
}
|
||||||
|
allocationsList := []string{}
|
||||||
|
for _, a := range gpuAllocations {
|
||||||
|
allocationsList = append(allocationsList, format.HumanBytes2(a))
|
||||||
|
}
|
||||||
|
|
||||||
|
estimate := MemoryEstimate{
|
||||||
|
TotalSize: memoryRequiredTotal,
|
||||||
|
Layers: 0,
|
||||||
|
Graph: 0,
|
||||||
|
VRAMSize: 0,
|
||||||
|
GPUSizes: []uint64{},
|
||||||
|
|
||||||
|
inferenceLibrary: gpus[0].Library,
|
||||||
|
layersRequested: opts.NumGPU,
|
||||||
|
layersModel: int(ggml.KV().BlockCount()) + 1,
|
||||||
|
availableList: availableList,
|
||||||
|
kv: kv,
|
||||||
|
allocationsList: allocationsList,
|
||||||
|
memoryWeights: memoryWeights,
|
||||||
|
memoryLayerOutput: memoryLayerOutput,
|
||||||
|
graphFullOffload: graphFullOffload,
|
||||||
|
graphPartialOffload: graphPartialOffload,
|
||||||
|
}
|
||||||
|
|
||||||
|
if gpus[0].Library == "cpu" {
|
||||||
|
return estimate
|
||||||
|
}
|
||||||
|
if layerCount == 0 {
|
||||||
|
slog.Debug("insufficient VRAM to load any model layers")
|
||||||
|
return estimate
|
||||||
|
}
|
||||||
|
estimate.Layers = layerCount
|
||||||
|
estimate.Graph = graphOffload
|
||||||
|
estimate.VRAMSize = memoryRequiredPartial
|
||||||
|
estimate.TotalSize = memoryRequiredTotal
|
||||||
|
estimate.TensorSplit = tensorSplit
|
||||||
|
estimate.GPUSizes = gpuAllocations
|
||||||
|
return estimate
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m MemoryEstimate) log() {
|
||||||
slog.Info(
|
slog.Info(
|
||||||
"offload to gpu",
|
"offload to "+m.inferenceLibrary,
|
||||||
slog.Group(
|
slog.Group(
|
||||||
"layers",
|
"layers",
|
||||||
// requested number of layers to offload
|
// requested number of layers to offload
|
||||||
"requested", opts.NumGPU,
|
"requested", m.layersRequested,
|
||||||
|
// The number of layers the model has (including output)
|
||||||
|
"model", m.layersModel,
|
||||||
// estimated number of layers that can be offloaded
|
// estimated number of layers that can be offloaded
|
||||||
"real", layerCount,
|
"offload", m.Layers,
|
||||||
|
// multi-gpu split for tensors
|
||||||
|
"split", m.TensorSplit,
|
||||||
),
|
),
|
||||||
slog.Group(
|
slog.Group(
|
||||||
"memory",
|
"memory",
|
||||||
// memory available for offloading
|
// memory available by GPU for offloading
|
||||||
"available", format.HumanBytes2(memoryAvailable),
|
"available", m.availableList,
|
||||||
slog.Group(
|
slog.Group(
|
||||||
"required",
|
"required",
|
||||||
// memory required for full offloading
|
// memory required for full offloading
|
||||||
"full", format.HumanBytes2(memoryRequiredTotal),
|
"full", format.HumanBytes2(m.TotalSize),
|
||||||
// memory required to offload layers.estimate layers
|
// memory required to offload layers.estimate layers
|
||||||
"partial", format.HumanBytes2(memoryRequiredPartial),
|
"partial", format.HumanBytes2(m.VRAMSize),
|
||||||
// memory of KV cache
|
// memory of KV cache
|
||||||
"kv", format.HumanBytes2(kv),
|
"kv", format.HumanBytes2(m.kv),
|
||||||
|
// Allocations across the GPUs
|
||||||
|
"allocations", m.allocationsList,
|
||||||
),
|
),
|
||||||
slog.Group(
|
slog.Group(
|
||||||
"weights",
|
"weights",
|
||||||
// memory of the weights
|
// memory of the weights
|
||||||
"total", format.HumanBytes2(memoryWeights),
|
"total", format.HumanBytes2(m.memoryWeights),
|
||||||
// memory of repeating layers
|
// memory of repeating layers
|
||||||
"repeating", format.HumanBytes2(memoryWeights-memoryLayerOutput),
|
"repeating", format.HumanBytes2(m.memoryWeights-m.memoryLayerOutput),
|
||||||
// memory of non-repeating layers
|
// memory of non-repeating layers
|
||||||
"nonrepeating", format.HumanBytes2(memoryLayerOutput),
|
"nonrepeating", format.HumanBytes2(m.memoryLayerOutput),
|
||||||
),
|
),
|
||||||
slog.Group(
|
slog.Group(
|
||||||
"graph",
|
"graph",
|
||||||
// memory of graph when fully offloaded
|
// memory of graph when fully offloaded
|
||||||
"full", format.HumanBytes2(graphFullOffload),
|
"full", format.HumanBytes2(m.graphFullOffload),
|
||||||
// memory of graph when not fully offloaded
|
// memory of graph when not fully offloaded
|
||||||
"partial", format.HumanBytes2(graphPartialOffload),
|
"partial", format.HumanBytes2(m.graphPartialOffload),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
if gpus[0].Library == "cpu" {
|
|
||||||
return 0, 0, memoryRequiredTotal
|
|
||||||
}
|
|
||||||
if memoryRequiredPartial > memoryAvailable {
|
|
||||||
slog.Debug("insufficient VRAM to load any model layers")
|
|
||||||
return 0, 0, memoryRequiredTotal
|
|
||||||
}
|
|
||||||
|
|
||||||
return layerCount, memoryRequiredPartial, memoryRequiredTotal
|
|
||||||
}
|
}
|
||||||
|
130
llm/memory_test.go
Normal file
130
llm/memory_test.go
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
package llm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/ollama/ollama/envconfig"
|
||||||
|
"github.com/ollama/ollama/gpu"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEstimateGPULayers(t *testing.T) {
|
||||||
|
envconfig.Debug = true
|
||||||
|
modelName := "dummy"
|
||||||
|
f, err := os.CreateTemp(t.TempDir(), modelName)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer f.Close()
|
||||||
|
gguf := NewGGUFV3(binary.LittleEndian)
|
||||||
|
inputLayerCount := 5
|
||||||
|
|
||||||
|
tensors := []Tensor{
|
||||||
|
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
|
{Name: "blk.1.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
|
{Name: "blk.2.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
|
{Name: "blk.3.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
|
{Name: "blk.4.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
|
{Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))},
|
||||||
|
}
|
||||||
|
assert.Len(t, tensors, inputLayerCount+1)
|
||||||
|
err = gguf.Encode(f, KV{
|
||||||
|
"general.architecture": "llama",
|
||||||
|
"general.name": "name",
|
||||||
|
"llama.context_length": uint32(32),
|
||||||
|
"llama.embedding_length": uint32(4096),
|
||||||
|
"llama.block_count": uint32(inputLayerCount),
|
||||||
|
"llama.attention.head_count": uint32(32),
|
||||||
|
"llama.attention.head_count_kv": uint32(32),
|
||||||
|
"tokenizer.ggml.tokens": []string{" "},
|
||||||
|
"tokenizer.ggml.scores": []float32{0},
|
||||||
|
"tokenizer.ggml.token_type": []int32{0},
|
||||||
|
}, tensors)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ggml, err := LoadModel(f.Name(), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple CPU scenario
|
||||||
|
gpus := []gpu.GpuInfo{
|
||||||
|
{
|
||||||
|
Library: "cpu",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
projectors := []string{}
|
||||||
|
opts := api.DefaultOptions()
|
||||||
|
t.Run("cpu", func(t *testing.T) {
|
||||||
|
estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||||
|
assert.Equal(t, 0, estimate.Layers)
|
||||||
|
assert.Equal(t, uint64(0), estimate.Graph)
|
||||||
|
})
|
||||||
|
|
||||||
|
// derived from the dummy ggml file above
|
||||||
|
graphPartialOffload := uint64(202377216)
|
||||||
|
graphFullOffload := uint64(171968512)
|
||||||
|
layerSize := uint64(33554436)
|
||||||
|
projectorSize := uint64(0)
|
||||||
|
memoryLayerOutput := uint64(4)
|
||||||
|
|
||||||
|
// Dual CUDA scenario with assymetry
|
||||||
|
gpuMinimumMemory := uint64(2048)
|
||||||
|
gpus = []gpu.GpuInfo{
|
||||||
|
{
|
||||||
|
Library: "cuda",
|
||||||
|
MinimumMemory: gpuMinimumMemory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Library: "cuda",
|
||||||
|
MinimumMemory: gpuMinimumMemory,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Nested array: GPU0 layer space, GPU1 layer space, expected gpu0, expected gpu1
|
||||||
|
for i, s := range []struct {
|
||||||
|
layer0, layer1 uint64
|
||||||
|
expect0, expect1 uint64
|
||||||
|
}{
|
||||||
|
{1, 1, 1, 1},
|
||||||
|
{2, 1, 2, 1},
|
||||||
|
{2, 2, 2, 2},
|
||||||
|
{1, 2, 1, 2},
|
||||||
|
{3, 3, 3, 3},
|
||||||
|
{4, 4, 3, 3},
|
||||||
|
{6, 6, 3, 3},
|
||||||
|
{0, 3, 0, 3},
|
||||||
|
} {
|
||||||
|
t.Run(fmt.Sprintf("%v", s), func(t *testing.T) {
|
||||||
|
gpus[0].FreeMemory = 0
|
||||||
|
gpus[1].FreeMemory = 0
|
||||||
|
gpus[0].FreeMemory += projectorSize
|
||||||
|
if s.layer0 > 0 {
|
||||||
|
gpus[0].FreeMemory += memoryLayerOutput
|
||||||
|
} else {
|
||||||
|
gpus[1].FreeMemory += memoryLayerOutput
|
||||||
|
}
|
||||||
|
gpus[0].FreeMemory += gpuMinimumMemory + layerSize + s.layer0*layerSize + 1
|
||||||
|
gpus[1].FreeMemory += gpuMinimumMemory + layerSize + s.layer1*layerSize + 1
|
||||||
|
gpus[0].FreeMemory += max(graphFullOffload, graphPartialOffload)
|
||||||
|
gpus[1].FreeMemory += max(graphFullOffload, graphPartialOffload)
|
||||||
|
estimate := EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||||
|
assert.Equal(t, int(s.expect0+s.expect1), estimate.Layers, "scenario %d: %v", i, s)
|
||||||
|
assert.Equal(t, fmt.Sprintf("%d,%d", s.expect0, s.expect1), estimate.TensorSplit, "scenario %d: %v", i, s)
|
||||||
|
var layerSums uint64
|
||||||
|
for _, b := range estimate.GPUSizes {
|
||||||
|
layerSums += b
|
||||||
|
}
|
||||||
|
if estimate.Layers < inputLayerCount+1 {
|
||||||
|
assert.Less(t, estimate.VRAMSize, estimate.TotalSize, "scenario %d: %v %+v", i, s, estimate)
|
||||||
|
assert.Equal(t, estimate.VRAMSize, layerSums, "scenario %d: %v %+v", i, s, estimate)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, estimate.VRAMSize, estimate.TotalSize, "scenario %d: %v %+v", i, s, estimate)
|
||||||
|
assert.Equal(t, estimate.TotalSize, layerSums, "scenario %d: %v %+v", i, s, estimate)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@@ -1,8 +1,8 @@
|
|||||||
diff --git a/common/common.cpp b/common/common.cpp
|
diff --git a/common/common.cpp b/common/common.cpp
|
||||||
index ba1ecf0e..cead57cc 100644
|
index 2c05a4d4..927f0e3d 100644
|
||||||
--- a/common/common.cpp
|
--- a/common/common.cpp
|
||||||
+++ b/common/common.cpp
|
+++ b/common/common.cpp
|
||||||
@@ -1836,6 +1836,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
@@ -2093,6 +2093,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||||
mparams.use_mmap = params.use_mmap;
|
mparams.use_mmap = params.use_mmap;
|
||||||
mparams.use_mlock = params.use_mlock;
|
mparams.use_mlock = params.use_mlock;
|
||||||
mparams.check_tensors = params.check_tensors;
|
mparams.check_tensors = params.check_tensors;
|
||||||
@@ -12,20 +12,20 @@ index ba1ecf0e..cead57cc 100644
|
|||||||
mparams.kv_overrides = NULL;
|
mparams.kv_overrides = NULL;
|
||||||
} else {
|
} else {
|
||||||
diff --git a/common/common.h b/common/common.h
|
diff --git a/common/common.h b/common/common.h
|
||||||
index d80344f2..71e84834 100644
|
index 65c0ef81..ebca2c77 100644
|
||||||
--- a/common/common.h
|
--- a/common/common.h
|
||||||
+++ b/common/common.h
|
+++ b/common/common.h
|
||||||
@@ -174,6 +174,13 @@ struct gpt_params {
|
@@ -184,6 +184,13 @@ struct gpt_params {
|
||||||
// multimodal models (see examples/llava)
|
|
||||||
std::string mmproj = ""; // path to multimodal projector
|
std::string mmproj = ""; // path to multimodal projector
|
||||||
std::vector<std::string> image; // path to image file(s)
|
std::vector<std::string> image; // path to image file(s)
|
||||||
+
|
|
||||||
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
||||||
+ // If the provided progress_callback returns true, model loading continues.
|
+ // If the provided progress_callback returns true, model loading continues.
|
||||||
+ // If it returns false, model loading is immediately aborted.
|
+ // If it returns false, model loading is immediately aborted.
|
||||||
+ llama_progress_callback progress_callback = NULL;
|
+ llama_progress_callback progress_callback = NULL;
|
||||||
+ // context pointer passed to the progress callback
|
+ // context pointer passed to the progress callback
|
||||||
+ void * progress_callback_user_data;
|
+ void * progress_callback_user_data;
|
||||||
};
|
+
|
||||||
|
// embedding
|
||||||
void gpt_params_handle_model_default(gpt_params & params);
|
bool embedding = false; // get only sentence embedding
|
||||||
|
int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
||||||
|
@@ -1,17 +1,8 @@
|
|||||||
From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
From: Michael Yang <mxyng@pm.me>
|
index 73f52435..58a00fb1 100644
|
||||||
Date: Thu, 23 May 2024 11:18:45 -0700
|
--- a/src/llama.cpp
|
||||||
Subject: [PATCH] throw exception on load errors
|
+++ b/src/llama.cpp
|
||||||
|
@@ -7241,7 +7241,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||||
---
|
|
||||||
llama.cpp | 25 ++++++++++++++++---------
|
|
||||||
1 file changed, 16 insertions(+), 9 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/llama.cpp b/llama.cpp
|
|
||||||
index 15c66077..8ba90b6a 100644
|
|
||||||
--- a/llama.cpp
|
|
||||||
+++ b/llama.cpp
|
|
||||||
@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
|
||||||
}
|
}
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
||||||
@@ -20,7 +11,7 @@ index 15c66077..8ba90b6a 100644
|
|||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file(
|
@@ -17564,16 +17564,23 @@ struct llama_model * llama_load_model_from_file(
|
||||||
}
|
}
|
||||||
model->rpc_servers.push_back(servers);
|
model->rpc_servers.push_back(servers);
|
||||||
}
|
}
|
||||||
@@ -52,6 +43,3 @@ index 15c66077..8ba90b6a 100644
|
|||||||
}
|
}
|
||||||
|
|
||||||
return model;
|
return model;
|
||||||
--
|
|
||||||
2.45.1
|
|
||||||
|
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
diff --git a/ggml-metal.m b/ggml-metal.m
|
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
|
||||||
index 0207b787..b5e9884b 100644
|
index 0207b787..b5e9884b 100644
|
||||||
--- a/ggml-metal.m
|
--- a/ggml/src/ggml-metal.m
|
||||||
+++ b/ggml-metal.m
|
+++ b/ggml/src/ggml-metal.m
|
||||||
@@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute(
|
@@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
// to the matrix-vector kernel
|
// to the matrix-vector kernel
|
||||||
int ne11_mm_min = 1;
|
int ne11_mm_min = 1;
|
||||||
|
@@ -1,35 +1,32 @@
|
|||||||
From d02a06f3f45a09255ace8684a66590e06ce44605 Mon Sep 17 00:00:00 2001
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
From: Michael Yang <mxyng@pm.me>
|
index 2b9ace28..172640e2 100644
|
||||||
Date: Thu, 23 May 2024 11:33:20 -0700
|
--- a/src/llama.cpp
|
||||||
Subject: [PATCH] default pretokenizer on unrecognized type
|
+++ b/src/llama.cpp
|
||||||
|
@@ -5357,16 +5357,7 @@ static void llm_load_vocab(
|
||||||
---
|
if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
|
||||||
llama.cpp | 5 +----
|
vocab.tokenizer_add_space_prefix = false;
|
||||||
1 file changed, 1 insertion(+), 4 deletions(-)
|
vocab.tokenizer_clean_spaces = true;
|
||||||
|
- if (tokenizer_pre.empty()) {
|
||||||
diff --git a/llama.cpp b/llama.cpp
|
- LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
|
||||||
index 15c66077..af1aede3 100644
|
- LLAMA_LOG_WARN("%s: \n", __func__);
|
||||||
--- a/llama.cpp
|
- LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
||||||
+++ b/llama.cpp
|
- LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__);
|
||||||
@@ -4504,9 +4504,6 @@ static void llm_load_vocab(
|
- LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__);
|
||||||
LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
- LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
||||||
LLAMA_LOG_WARN("%s: \n", __func__);
|
- LLAMA_LOG_WARN("%s: \n", __func__);
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
|
||||||
- } else if (
|
|
||||||
- tokenizer_pre == "default") {
|
|
||||||
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
- } else if (tokenizer_pre == "default") {
|
||||||
|
+ if (tokenizer_pre == "default") {
|
||||||
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "llama3" ||
|
tokenizer_pre == "llama3" ||
|
||||||
tokenizer_pre == "llama-v3" ||
|
@@ -5439,7 +5430,8 @@ static void llm_load_vocab(
|
||||||
@@ -4553,7 +4550,7 @@ static void llm_load_vocab(
|
tokenizer_pre == "jais") {
|
||||||
tokenizer_pre == "dbrx") {
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
|
|
||||||
} else {
|
} else {
|
||||||
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||||
|
+ LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
|
||||||
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
}
|
}
|
||||||
} else {
|
} else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
--
|
|
||||||
2.45.1
|
|
||||||
|
|
||||||
|
13
llm/patches/06-qwen2.diff
Normal file
13
llm/patches/06-qwen2.diff
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
|
index 40d2ec2c..f34eb79a 100644
|
||||||
|
--- a/src/llama.cpp
|
||||||
|
+++ b/src/llama.cpp
|
||||||
|
@@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv(
|
||||||
|
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||||
|
cb(kq, "kq", il);
|
||||||
|
|
||||||
|
- if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
|
||||||
|
+ if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
|
||||||
|
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
|
||||||
|
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
|
||||||
|
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
45
llm/patches/07-embeddings.diff
Normal file
45
llm/patches/07-embeddings.diff
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
|
index 1fe2b9f7..a43312a7 100644
|
||||||
|
--- a/src/llama.cpp
|
||||||
|
+++ b/src/llama.cpp
|
||||||
|
@@ -13689,7 +13689,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
|
||||||
|
const auto n_embd = hparams.n_embd;
|
||||||
|
|
||||||
|
// TODO: use a per-batch flag for logits presence instead
|
||||||
|
- const bool has_logits = !cparams.embeddings;
|
||||||
|
+ const bool has_logits = cparams.causal_attn;
|
||||||
|
const bool has_embd = lctx.is_encoding || (cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE));
|
||||||
|
|
||||||
|
const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
|
||||||
|
@@ -13959,17 +13959,25 @@ static int llama_decode_internal(
|
||||||
|
// no output
|
||||||
|
res = nullptr;
|
||||||
|
embd = nullptr;
|
||||||
|
- } else if (cparams.embeddings) {
|
||||||
|
- res = nullptr; // do not extract logits for embedding case
|
||||||
|
- embd = gf->nodes[gf->n_nodes - 1];
|
||||||
|
- if (strcmp(embd->name, "result_embd_pooled") != 0) {
|
||||||
|
- embd = gf->nodes[gf->n_nodes - 2];
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (cparams.embeddings) {
|
||||||
|
+ for (int i = gf->n_nodes - 1; i >= 0; --i) {
|
||||||
|
+ embd = gf->nodes[i];
|
||||||
|
+ if (strcmp(embd->name, "result_embd_pooled") == 0) {
|
||||||
|
+ break;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor");
|
||||||
|
- } else {
|
||||||
|
+ } else {
|
||||||
|
embd = nullptr; // do not extract embeddings when not needed
|
||||||
|
GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
|
||||||
|
}
|
||||||
|
+
|
||||||
|
+ if (!cparams.causal_attn) {
|
||||||
|
+ res = nullptr; // do not extract logits when not needed
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
||||||
|
|
||||||
|
ggml_backend_sched_alloc_graph(lctx.sched, gf);
|
42
llm/patches/08-clip-unicode.diff
Normal file
42
llm/patches/08-clip-unicode.diff
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
||||||
|
index 95fbe3d0..5a02a6ec 100644
|
||||||
|
--- a/examples/llava/clip.cpp
|
||||||
|
+++ b/examples/llava/clip.cpp
|
||||||
|
@@ -32,6 +33,14 @@
|
||||||
|
#include <cinttypes>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
+#if defined(_WIN32)
|
||||||
|
+#define WIN32_LEAN_AND_MEAN
|
||||||
|
+#ifndef NOMINMAX
|
||||||
|
+ #define NOMINMAX
|
||||||
|
+#endif
|
||||||
|
+#include <windows.h>
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
//#define CLIP_DEBUG_FUNCTIONS
|
||||||
|
|
||||||
|
// RGB uint8 image
|
||||||
|
@@ -1055,7 +1064,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifdef _WIN32
|
||||||
|
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, NULL, 0);
|
||||||
|
+ if (!wlen) {
|
||||||
|
+ return NULL;
|
||||||
|
+ }
|
||||||
|
+ wchar_t * wbuf = (wchar_t *) malloc(wlen * sizeof(wchar_t));
|
||||||
|
+ wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, wbuf, wlen);
|
||||||
|
+ if (!wlen) {
|
||||||
|
+ free(wbuf);
|
||||||
|
+ return NULL;
|
||||||
|
+ }
|
||||||
|
+ auto fin = std::ifstream(wbuf, std::ios::binary);
|
||||||
|
+ free(wbuf);
|
||||||
|
+#else
|
||||||
|
auto fin = std::ifstream(fname, std::ios::binary);
|
||||||
|
+#endif
|
||||||
|
if (!fin) {
|
||||||
|
LOG_TEE("cannot open model file for loading tensors\n");
|
||||||
|
clip_free(new_clip);
|
60
llm/patches/09-pooling.diff
Normal file
60
llm/patches/09-pooling.diff
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
|
index 721b8f4e..cfe7ac40 100644
|
||||||
|
--- a/src/llama.cpp
|
||||||
|
+++ b/src/llama.cpp
|
||||||
|
@@ -8420,14 +8420,14 @@ struct llm_build_context {
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * build_inp_mean() {
|
||||||
|
- lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
|
||||||
|
+ lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, cparams.n_seq_max);
|
||||||
|
cb(lctx.inp_mean, "inp_mean", -1);
|
||||||
|
ggml_set_input(lctx.inp_mean);
|
||||||
|
return lctx.inp_mean;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * build_inp_cls() {
|
||||||
|
- lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
||||||
|
+ lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, cparams.n_seq_max);
|
||||||
|
cb(lctx.inp_cls, "inp_cls", -1);
|
||||||
|
ggml_set_input(lctx.inp_cls);
|
||||||
|
return lctx.inp_cls;
|
||||||
|
@@ -13847,19 +13847,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
|
||||||
|
|
||||||
|
float * data = (float *) lctx.inp_mean->data;
|
||||||
|
- memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
|
||||||
|
+ memset(lctx.inp_mean->data, 0, n_tokens * cparams.n_seq_max * ggml_element_size(lctx.inp_mean));
|
||||||
|
|
||||||
|
std::vector<uint64_t> sum(n_tokens, 0);
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||||
|
-
|
||||||
|
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
|
||||||
|
-
|
||||||
|
sum[seq_id] += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
- std::vector<float> div(n_tokens, 0.0f);
|
||||||
|
- for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
+ std::vector<float> div(cparams.n_seq_max, 0.0f);
|
||||||
|
+ for (uint32_t i = 0; i < cparams.n_seq_max; ++i) {
|
||||||
|
const uint64_t s = sum[i];
|
||||||
|
if (s > 0) {
|
||||||
|
div[i] = 1.0f/float(s);
|
||||||
|
@@ -13879,14 +13876,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
|
||||||
|
|
||||||
|
uint32_t * data = (uint32_t *) lctx.inp_cls->data;
|
||||||
|
- memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
|
||||||
|
+ memset(lctx.inp_cls->data, 0, cparams.n_seq_max * ggml_element_size(lctx.inp_cls));
|
||||||
|
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||||
|
const llama_pos pos = batch.pos[i];
|
||||||
|
-
|
||||||
|
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS");
|
||||||
|
-
|
||||||
|
if (pos == 0) {
|
||||||
|
data[seq_id] = i;
|
||||||
|
}
|
@@ -10,9 +10,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/ollama/ollama/gpu"
|
"github.com/ollama/ollama/gpu"
|
||||||
@@ -38,7 +38,7 @@ func Init() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var variants []string
|
var variants []string
|
||||||
for v := range availableServers() {
|
for v := range getAvailableServers() {
|
||||||
variants = append(variants, v)
|
variants = append(variants, v)
|
||||||
}
|
}
|
||||||
slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants))
|
slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants))
|
||||||
@@ -50,7 +50,7 @@ func Init() error {
|
|||||||
// binary names may contain an optional variant separated by '_'
|
// binary names may contain an optional variant separated by '_'
|
||||||
// For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2"
|
// For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2"
|
||||||
// Any library without a variant is the lowest common denominator
|
// Any library without a variant is the lowest common denominator
|
||||||
func availableServers() map[string]string {
|
func getAvailableServers() map[string]string {
|
||||||
payloadsDir, err := gpu.PayloadsDir()
|
payloadsDir, err := gpu.PayloadsDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("payload lookup error", "error", err)
|
slog.Error("payload lookup error", "error", err)
|
||||||
@@ -58,7 +58,7 @@ func availableServers() map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// glob payloadsDir for files that start with ollama_
|
// glob payloadsDir for files that start with ollama_
|
||||||
pattern := filepath.Join(payloadsDir, "*")
|
pattern := filepath.Join(payloadsDir, "*", "ollama_*")
|
||||||
|
|
||||||
files, err := filepath.Glob(pattern)
|
files, err := filepath.Glob(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -69,7 +69,7 @@ func availableServers() map[string]string {
|
|||||||
servers := make(map[string]string)
|
servers := make(map[string]string)
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
slog.Debug("availableServers : found", "file", file)
|
slog.Debug("availableServers : found", "file", file)
|
||||||
servers[filepath.Base(file)] = file
|
servers[filepath.Base(filepath.Dir(file))] = filepath.Dir(file)
|
||||||
}
|
}
|
||||||
|
|
||||||
return servers
|
return servers
|
||||||
@@ -80,10 +80,10 @@ func availableServers() map[string]string {
|
|||||||
// TODO - switch to metadata based mapping
|
// TODO - switch to metadata based mapping
|
||||||
func serversForGpu(info gpu.GpuInfo) []string {
|
func serversForGpu(info gpu.GpuInfo) []string {
|
||||||
// glob workDir for files that start with ollama_
|
// glob workDir for files that start with ollama_
|
||||||
availableServers := availableServers()
|
availableServers := getAvailableServers()
|
||||||
requested := info.Library
|
requested := info.Library
|
||||||
if info.Variant != "" {
|
if info.Variant != gpu.CPUCapabilityNone {
|
||||||
requested += "_" + info.Variant
|
requested += "_" + info.Variant.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
servers := []string{}
|
servers := []string{}
|
||||||
@@ -115,16 +115,17 @@ func serversForGpu(info gpu.GpuInfo) []string {
|
|||||||
servers = append(servers, alt...)
|
servers = append(servers, alt...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") {
|
||||||
// Load up the best CPU variant if not primary requested
|
// Load up the best CPU variant if not primary requested
|
||||||
if info.Library != "cpu" {
|
if info.Library != "cpu" {
|
||||||
variant := gpu.GetCPUVariant()
|
variant := gpu.GetCPUCapability()
|
||||||
// If no variant, then we fall back to default
|
// If no variant, then we fall back to default
|
||||||
// If we have a variant, try that if we find an exact match
|
// If we have a variant, try that if we find an exact match
|
||||||
// Attempting to run the wrong CPU instructions will panic the
|
// Attempting to run the wrong CPU instructions will panic the
|
||||||
// process
|
// process
|
||||||
if variant != "" {
|
if variant != gpu.CPUCapabilityNone {
|
||||||
for cmp := range availableServers {
|
for cmp := range availableServers {
|
||||||
if cmp == "cpu_"+variant {
|
if cmp == "cpu_"+variant.String() {
|
||||||
servers = append(servers, cmp)
|
servers = append(servers, cmp)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -137,6 +138,7 @@ func serversForGpu(info gpu.GpuInfo) []string {
|
|||||||
if len(servers) == 0 {
|
if len(servers) == 0 {
|
||||||
servers = []string{"cpu"}
|
servers = []string{"cpu"}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return servers
|
return servers
|
||||||
}
|
}
|
||||||
@@ -146,11 +148,11 @@ func serverForCpu() string {
|
|||||||
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
|
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
|
||||||
return "metal"
|
return "metal"
|
||||||
}
|
}
|
||||||
variant := gpu.GetCPUVariant()
|
variant := gpu.GetCPUCapability()
|
||||||
availableServers := availableServers()
|
availableServers := getAvailableServers()
|
||||||
if variant != "" {
|
if variant != gpu.CPUCapabilityNone {
|
||||||
for cmp := range availableServers {
|
for cmp := range availableServers {
|
||||||
if cmp == "cpu_"+variant {
|
if cmp == "cpu_"+variant.String() {
|
||||||
return cmp
|
return cmp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
216
llm/server.go
216
llm/server.go
@@ -37,8 +37,9 @@ type LlamaServer interface {
|
|||||||
Tokenize(ctx context.Context, content string) ([]int, error)
|
Tokenize(ctx context.Context, content string) ([]int, error)
|
||||||
Detokenize(ctx context.Context, tokens []int) (string, error)
|
Detokenize(ctx context.Context, tokens []int) (string, error)
|
||||||
Close() error
|
Close() error
|
||||||
EstimatedVRAM() uint64
|
EstimatedVRAM() uint64 // Total VRAM across all GPUs
|
||||||
EstimatedTotal() uint64
|
EstimatedTotal() uint64
|
||||||
|
EstimatedVRAMByGPU(gpuID string) uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// llmServer is an instance of the llama.cpp server
|
// llmServer is an instance of the llama.cpp server
|
||||||
@@ -49,18 +50,22 @@ type llmServer struct {
|
|||||||
status *StatusWriter
|
status *StatusWriter
|
||||||
options api.Options
|
options api.Options
|
||||||
|
|
||||||
// TODO - this should be broken down by GPU
|
estimate MemoryEstimate
|
||||||
estimatedVRAM uint64 // Estimated usage of VRAM by the loaded model
|
|
||||||
estimatedTotal uint64 // Total size of model
|
|
||||||
totalLayers uint64
|
totalLayers uint64
|
||||||
gpuCount int
|
// gpuCount int
|
||||||
|
gpus gpu.GpuInfoList // Recorded just before the model loaded, free space will be incorrect
|
||||||
loadDuration time.Duration // Record how long it took the model to load
|
loadDuration time.Duration // Record how long it took the model to load
|
||||||
loadProgress float32
|
loadProgress float32
|
||||||
|
|
||||||
sem *semaphore.Weighted
|
sem *semaphore.Weighted
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadModel(model string) (*GGML, error) {
|
// LoadModel will load a model from disk. The model must be in the GGML format.
|
||||||
|
//
|
||||||
|
// It collects array values for arrays with a size less than or equal to
|
||||||
|
// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If
|
||||||
|
// the maxArraySize is negative, all arrays are collected.
|
||||||
|
func LoadModel(model string, maxArraySize int) (*GGML, error) {
|
||||||
if _, err := os.Stat(model); err != nil {
|
if _, err := os.Stat(model); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -71,60 +76,87 @@ func LoadModel(model string) (*GGML, error) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
ggml, _, err := DecodeGGML(f)
|
ggml, _, err := DecodeGGML(f, maxArraySize)
|
||||||
return ggml, err
|
return ggml, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLlamaServer will run a server for the given GPUs
|
// NewLlamaServer will run a server for the given GPUs
|
||||||
// The gpu list must be a single family.
|
// The gpu list must be a single family.
|
||||||
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
|
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
|
||||||
var err error
|
var err error
|
||||||
var cpuRunner string
|
var cpuRunner string
|
||||||
var estimatedVRAM uint64
|
var estimate MemoryEstimate
|
||||||
var estimatedTotal uint64
|
var systemTotalMemory uint64
|
||||||
var systemMemory uint64
|
var systemFreeMemory uint64
|
||||||
gpuCount := len(gpus)
|
var systemSwapFreeMemory uint64
|
||||||
if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
|
|
||||||
|
|
||||||
// TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
|
systemMemInfo, err := gpu.GetCPUMem()
|
||||||
|
|
||||||
cpuRunner = serverForCpu()
|
|
||||||
gpuCount = 0
|
|
||||||
_, _, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
|
|
||||||
} else {
|
|
||||||
if gpus[0].Library == "metal" {
|
|
||||||
memInfo, err := gpu.GetCPUMem()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("failed to lookup system memory", "error", err)
|
slog.Error("failed to lookup system memory", "error", err)
|
||||||
} else {
|
} else {
|
||||||
systemMemory = memInfo.TotalMemory
|
systemTotalMemory = systemMemInfo.TotalMemory
|
||||||
slog.Debug("system memory", "total", format.HumanBytes2(systemMemory))
|
systemFreeMemory = systemMemInfo.FreeMemory
|
||||||
|
systemSwapFreeMemory = systemMemInfo.FreeSwap
|
||||||
|
slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
|
||||||
}
|
}
|
||||||
}
|
|
||||||
var layers int
|
|
||||||
layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
|
|
||||||
|
|
||||||
if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
|
// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
|
||||||
|
if opts.NumGPU == 0 {
|
||||||
|
gpus = gpu.GetCPUInfo()
|
||||||
|
}
|
||||||
|
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
||||||
|
cpuRunner = serverForCpu()
|
||||||
|
estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||||
|
} else {
|
||||||
|
estimate = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory:
|
||||||
// disable partial offloading when model is greater than total system memory as this
|
// disable partial offloading when model is greater than total system memory as this
|
||||||
// can lead to locking up the system
|
// can lead to locking up the system
|
||||||
opts.NumGPU = 0
|
opts.NumGPU = 0
|
||||||
} else if gpus[0].Library != "metal" && layers == 0 {
|
case gpus[0].Library != "metal" && estimate.Layers == 0:
|
||||||
// Don't bother loading into the GPU if no layers can fit
|
// Don't bother loading into the GPU if no layers can fit
|
||||||
cpuRunner = serverForCpu()
|
cpuRunner = serverForCpu()
|
||||||
gpuCount = 0
|
gpus = gpu.GetCPUInfo()
|
||||||
} else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
|
case opts.NumGPU < 0 && estimate.Layers > 0 && gpus[0].Library != "cpu":
|
||||||
opts.NumGPU = layers
|
opts.NumGPU = estimate.Layers
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// On linux, over-allocating CPU memory will almost always result in an error
|
||||||
|
if runtime.GOOS == "linux" {
|
||||||
|
systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
|
||||||
|
available := min(systemTotalMemory, systemFreeMemory+systemSwapFreeMemory)
|
||||||
|
if systemMemoryRequired > available {
|
||||||
|
slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
|
||||||
|
return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
estimate.log()
|
||||||
|
|
||||||
// Loop through potential servers
|
// Loop through potential servers
|
||||||
finalErr := fmt.Errorf("no suitable llama servers found")
|
finalErr := errors.New("no suitable llama servers found")
|
||||||
|
|
||||||
if len(adapters) > 1 {
|
if len(adapters) > 1 {
|
||||||
return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
|
return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
availableServers := availableServers()
|
availableServers := getAvailableServers()
|
||||||
|
if len(availableServers) == 0 {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
slog.Warn("llama server binary disappeared, reinitializing payloads")
|
||||||
|
err = Init()
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("failed to reinitialize payloads", "error", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
availableServers = getAvailableServers()
|
||||||
|
} else {
|
||||||
|
return nil, finalErr
|
||||||
|
}
|
||||||
|
}
|
||||||
var servers []string
|
var servers []string
|
||||||
if cpuRunner != "" {
|
if cpuRunner != "" {
|
||||||
servers = []string{cpuRunner}
|
servers = []string{cpuRunner}
|
||||||
@@ -189,47 +221,52 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
params = append(params, "--memory-f32")
|
params = append(params, "--memory-f32")
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.UseMLock {
|
flashAttnEnabled := envconfig.FlashAttention
|
||||||
params = append(params, "--mlock")
|
|
||||||
|
for _, g := range gpus {
|
||||||
|
// only cuda (compute capability 7+) and metal support flash attention
|
||||||
|
if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
|
||||||
|
flashAttnEnabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !opts.UseMMap {
|
// mmap has issues with partial offloading on metal
|
||||||
|
if g.Library == "metal" &&
|
||||||
|
uint64(opts.NumGPU) > 0 &&
|
||||||
|
uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
||||||
|
opts.UseMMap = new(bool)
|
||||||
|
*opts.UseMMap = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if flashAttnEnabled {
|
||||||
|
params = append(params, "--flash-attn")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Windows CUDA should not use mmap for best performance
|
||||||
|
// Linux with a model larger than free space, mmap leads to thrashing
|
||||||
|
// For CPU loads we want the memory to be allocated, not FS cache
|
||||||
|
if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
|
||||||
|
(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
|
||||||
|
(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
|
||||||
|
(opts.UseMMap != nil && !*opts.UseMMap) {
|
||||||
params = append(params, "--no-mmap")
|
params = append(params, "--no-mmap")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opts.UseMLock {
|
||||||
|
params = append(params, "--mlock")
|
||||||
|
}
|
||||||
|
|
||||||
if opts.UseNUMA {
|
if opts.UseNUMA {
|
||||||
params = append(params, "--numa")
|
params = append(params, "--numa")
|
||||||
}
|
}
|
||||||
|
|
||||||
flashAttnEnabled := envconfig.FlashAttention
|
|
||||||
|
|
||||||
// partial offloading does not support flash attention
|
|
||||||
if uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
|
||||||
flashAttnEnabled = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// only cuda (compute capability 7+) and metal support flash attention
|
|
||||||
for _, g := range gpus {
|
|
||||||
if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
|
|
||||||
flashAttnEnabled = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if flashAttnEnabled {
|
|
||||||
params = append(params, "--flash-attn")
|
|
||||||
}
|
|
||||||
|
|
||||||
numParallel := envconfig.NumParallel
|
|
||||||
|
|
||||||
// TODO (jmorganca): multimodal models don't support parallel yet
|
|
||||||
// see https://github.com/ollama/ollama/issues/4165
|
|
||||||
if len(projectors) > 0 {
|
|
||||||
numParallel = 1
|
|
||||||
slog.Warn("multimodal models don't support parallel requests yet")
|
|
||||||
}
|
|
||||||
|
|
||||||
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
||||||
|
|
||||||
for i := 0; i < len(servers); i++ {
|
if estimate.TensorSplit != "" {
|
||||||
|
params = append(params, "--tensor-split", estimate.TensorSplit)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range len(servers) {
|
||||||
dir := availableServers[servers[i]]
|
dir := availableServers[servers[i]]
|
||||||
if dir == "" {
|
if dir == "" {
|
||||||
// Shouldn't happen
|
// Shouldn't happen
|
||||||
@@ -239,8 +276,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(servers[i], "cpu") {
|
if strings.HasPrefix(servers[i], "cpu") {
|
||||||
// TODO if we tried a gpu runner first, and it failed, record the error and bubble that back up
|
gpus = gpu.GetCPUInfo()
|
||||||
gpuCount = 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find an availableServers port, retry on each iteration in case the failure was a port conflict race
|
// Find an availableServers port, retry on each iteration in case the failure was a port conflict race
|
||||||
@@ -262,8 +298,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
pathEnv = "PATH"
|
pathEnv = "PATH"
|
||||||
}
|
}
|
||||||
// prepend the server directory to LD_LIBRARY_PATH/PATH
|
// prepend the server directory to LD_LIBRARY_PATH/PATH and the parent dir for common dependencies
|
||||||
libraryPaths := []string{dir}
|
libraryPaths := []string{dir, filepath.Dir(dir)}
|
||||||
|
|
||||||
if libraryPath, ok := os.LookupEnv(pathEnv); ok {
|
if libraryPath, ok := os.LookupEnv(pathEnv); ok {
|
||||||
// Append our runner directory to the path
|
// Append our runner directory to the path
|
||||||
@@ -281,7 +317,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
|
|
||||||
server := filepath.Join(dir, "ollama_llama_server")
|
server := filepath.Join(dir, "ollama_llama_server")
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
server = server + ".exe"
|
server += ".exe"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detect tmp cleaners wiping out the file
|
// Detect tmp cleaners wiping out the file
|
||||||
@@ -300,11 +336,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
cmd: exec.Command(server, finalParams...),
|
cmd: exec.Command(server, finalParams...),
|
||||||
status: NewStatusWriter(os.Stderr),
|
status: NewStatusWriter(os.Stderr),
|
||||||
options: opts,
|
options: opts,
|
||||||
estimatedVRAM: estimatedVRAM,
|
estimate: estimate,
|
||||||
estimatedTotal: estimatedTotal,
|
|
||||||
sem: semaphore.NewWeighted(int64(numParallel)),
|
sem: semaphore.NewWeighted(int64(numParallel)),
|
||||||
totalLayers: ggml.KV().BlockCount() + 1,
|
totalLayers: ggml.KV().BlockCount() + 1,
|
||||||
gpuCount: gpuCount,
|
gpus: gpus,
|
||||||
done: make(chan error, 1),
|
done: make(chan error, 1),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -312,7 +347,11 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
s.cmd.Stdout = os.Stdout
|
s.cmd.Stdout = os.Stdout
|
||||||
s.cmd.Stderr = s.status
|
s.cmd.Stderr = s.status
|
||||||
|
|
||||||
visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
|
envWorkarounds := [][2]string{}
|
||||||
|
for _, gpu := range gpus {
|
||||||
|
envWorkarounds = append(envWorkarounds, gpu.EnvWorkarounds...)
|
||||||
|
}
|
||||||
|
visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
|
||||||
pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
|
pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
|
||||||
|
|
||||||
// Update or add the path and visible devices variable with our adjusted version
|
// Update or add the path and visible devices variable with our adjusted version
|
||||||
@@ -326,6 +365,12 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
|||||||
} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
|
} else if devicesNeeded && strings.EqualFold(cmp[0], visibleDevicesEnv) {
|
||||||
s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
|
s.cmd.Env[i] = visibleDevicesEnv + "=" + visibleDevicesEnvVal
|
||||||
devicesNeeded = false
|
devicesNeeded = false
|
||||||
|
} else if len(envWorkarounds) != 0 {
|
||||||
|
for _, kv := range envWorkarounds {
|
||||||
|
if strings.EqualFold(cmp[0], kv[0]) {
|
||||||
|
s.cmd.Env[i] = kv[0] + "=" + kv[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if pathNeeded {
|
if pathNeeded {
|
||||||
@@ -387,7 +432,7 @@ func projectorMemoryRequirements(filename string) uint64 {
|
|||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
ggml, _, err := DecodeGGML(file)
|
ggml, _, err := DecodeGGML(file, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@@ -456,7 +501,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
|||||||
resp, err := http.DefaultClient.Do(req)
|
resp, err := http.DefaultClient.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
return ServerStatusNotResponding, fmt.Errorf("server not responding")
|
return ServerStatusNotResponding, errors.New("server not responding")
|
||||||
}
|
}
|
||||||
return ServerStatusError, fmt.Errorf("health resp: %w", err)
|
return ServerStatusError, fmt.Errorf("health resp: %w", err)
|
||||||
}
|
}
|
||||||
@@ -537,6 +582,9 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
|||||||
if s.status != nil && s.status.LastErrMsg != "" {
|
if s.status != nil && s.status.LastErrMsg != "" {
|
||||||
msg = s.status.LastErrMsg
|
msg = s.status.LastErrMsg
|
||||||
}
|
}
|
||||||
|
if strings.Contains(msg, "unknown model") {
|
||||||
|
return fmt.Errorf("this model is not supported by your version of Ollama. You may need to upgrade")
|
||||||
|
}
|
||||||
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
@@ -603,7 +651,7 @@ array ::=
|
|||||||
|
|
||||||
string ::=
|
string ::=
|
||||||
"\"" (
|
"\"" (
|
||||||
[^"\\] |
|
[^"\\\x7F\x00-\x1F] |
|
||||||
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
||||||
)* "\"" ws
|
)* "\"" ws
|
||||||
|
|
||||||
@@ -639,7 +687,7 @@ type CompletionRequest struct {
|
|||||||
Prompt string
|
Prompt string
|
||||||
Format string
|
Format string
|
||||||
Images []ImageData
|
Images []ImageData
|
||||||
Options api.Options
|
Options *api.Options
|
||||||
}
|
}
|
||||||
|
|
||||||
type CompletionResponse struct {
|
type CompletionResponse struct {
|
||||||
@@ -659,10 +707,9 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
|||||||
}
|
}
|
||||||
defer s.sem.Release(1)
|
defer s.sem.Release(1)
|
||||||
|
|
||||||
// only allow maximum 10 "context shifts" to avoid infinite generation
|
// put an upper limit on num_predict to avoid the model running on forever
|
||||||
if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
|
if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx {
|
||||||
req.Options.NumPredict = 10 * s.options.NumCtx
|
req.Options.NumPredict = 10 * s.options.NumCtx
|
||||||
slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
request := map[string]any{
|
request := map[string]any{
|
||||||
@@ -1001,11 +1048,20 @@ func (s *llmServer) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *llmServer) EstimatedVRAM() uint64 {
|
func (s *llmServer) EstimatedVRAM() uint64 {
|
||||||
return s.estimatedVRAM
|
return s.estimate.VRAMSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *llmServer) EstimatedTotal() uint64 {
|
func (s *llmServer) EstimatedTotal() uint64 {
|
||||||
return s.estimatedTotal
|
return s.estimate.TotalSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *llmServer) EstimatedVRAMByGPU(gpuID string) uint64 {
|
||||||
|
for i, gpu := range s.gpus {
|
||||||
|
if gpu.ID == gpuID {
|
||||||
|
return s.estimate.GPUSizes[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseDurationMs(ms float64) time.Duration {
|
func parseDurationMs(ms float64) time.Duration {
|
||||||
|
@@ -25,6 +25,7 @@ var errorPrefixes = []string{
|
|||||||
"CUDA error",
|
"CUDA error",
|
||||||
"cudaMalloc failed",
|
"cudaMalloc failed",
|
||||||
"\"ERR\"",
|
"\"ERR\"",
|
||||||
|
"error loading model",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *StatusWriter) Write(b []byte) (int, error) {
|
func (w *StatusWriter) Write(b []byte) (int, error) {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user