Compare commits
1 Commits
whitespace
...
mattw/howt
Author | SHA1 | Date | |
---|---|---|---|
![]() |
4522109b11 |
@@ -2,7 +2,7 @@
|
||||
ollama
|
||||
app
|
||||
dist
|
||||
llm/llama.cpp
|
||||
scripts
|
||||
llm/llama.cpp/ggml
|
||||
llm/llama.cpp/gguf
|
||||
.env
|
||||
.cache
|
||||
test_data
|
162
.github/workflows/test.yaml
vendored
@@ -1,162 +0,0 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
generate:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
arch: [amd64, arm64]
|
||||
exclude:
|
||||
- os: ubuntu-latest
|
||||
arch: arm64
|
||||
- os: windows-latest
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: true
|
||||
- run: go get ./...
|
||||
- run: go generate -x ./...
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
||||
path: llm/llama.cpp/build/**/lib/*
|
||||
generate-cuda:
|
||||
strategy:
|
||||
matrix:
|
||||
cuda-version:
|
||||
- '11.8.0'
|
||||
runs-on: linux
|
||||
container: nvidia/cuda:${{ matrix.cuda-version }}-devel-ubuntu20.04
|
||||
steps:
|
||||
- run: |
|
||||
apt-get update && apt-get install -y git build-essential curl
|
||||
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
||||
| tar -zx -C /usr --strip-components 1
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: true
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
git config --global --add safe.directory /__w/ollama/ollama
|
||||
go generate -x ./...
|
||||
env:
|
||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: cuda-${{ matrix.cuda-version }}-libraries
|
||||
path: llm/llama.cpp/build/**/lib/*
|
||||
generate-rocm:
|
||||
strategy:
|
||||
matrix:
|
||||
rocm-version:
|
||||
- '5.7.1'
|
||||
- '6.0'
|
||||
runs-on: linux
|
||||
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
||||
steps:
|
||||
- run: |
|
||||
apt-get update && apt-get install -y git build-essential curl rocm-libs
|
||||
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
||||
| tar -zx -C /usr --strip-components 1
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: true
|
||||
- run: go get ./...
|
||||
- run: |
|
||||
git config --global --add safe.directory /__w/ollama/ollama
|
||||
go generate -x ./...
|
||||
env:
|
||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: rocm-${{ matrix.rocm-version }}-libraries
|
||||
path: llm/llama.cpp/build/**/lib/*
|
||||
lint:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
arch: [amd64, arm64]
|
||||
exclude:
|
||||
- os: ubuntu-latest
|
||||
arch: arm64
|
||||
- os: windows-latest
|
||||
arch: arm64
|
||||
- os: macos-latest
|
||||
arch: amd64
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: false
|
||||
- run: |
|
||||
mkdir -p llm/llama.cpp/build/linux/${{ matrix.arch }}/stub/lib/
|
||||
touch llm/llama.cpp/build/linux/${{ matrix.arch }}/stub/lib/stub.so
|
||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
||||
- run: |
|
||||
mkdir -p llm/llama.cpp/build/darwin/${{ matrix.arch }}/stub/lib/
|
||||
touch llm/llama.cpp/build/darwin/${{ matrix.arch }}/stub/lib/stub.dylib
|
||||
touch llm/llama.cpp/ggml-metal.metal
|
||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||
- run: |
|
||||
mkdir -p llm/llama.cpp/build/windows/${{ matrix.arch }}/stub/lib/
|
||||
touch llm/llama.cpp/build/windows/${{ matrix.arch }}/stub/lib/stub.dll
|
||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
test:
|
||||
needs: generate
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
arch: [amd64]
|
||||
exclude:
|
||||
- os: ubuntu-latest
|
||||
arch: arm64
|
||||
- os: windows-latest
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
CGO_ENABLED: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
cache: true
|
||||
- run: go get
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
||||
path: llm/llama.cpp/build
|
||||
- run: go build
|
||||
- run: go test -v ./...
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.os }}-binaries
|
||||
path: ollama
|
5
.gitignore
vendored
@@ -6,8 +6,3 @@
|
||||
dist
|
||||
ollama
|
||||
ggml-metal.metal
|
||||
.cache
|
||||
*.exe
|
||||
.idea
|
||||
test_data
|
||||
*.crt
|
14
.gitmodules
vendored
@@ -1,4 +1,10 @@
|
||||
[submodule "llama.cpp"]
|
||||
path = llm/llama.cpp
|
||||
url = https://github.com/ggerganov/llama.cpp.git
|
||||
shallow = true
|
||||
[submodule "llm/llama.cpp/ggml"]
|
||||
path = llm/llama.cpp/ggml
|
||||
url = https://github.com/ggerganov/llama.cpp.git
|
||||
ignore = dirty
|
||||
shallow = true
|
||||
[submodule "llm/llama.cpp/gguf"]
|
||||
path = llm/llama.cpp/gguf
|
||||
url = https://github.com/ggerganov/llama.cpp.git
|
||||
ignore = dirty
|
||||
shallow = true
|
||||
|
@@ -1,27 +0,0 @@
|
||||
run:
|
||||
timeout: 5m
|
||||
linters:
|
||||
enable:
|
||||
- asasalint
|
||||
- bidichk
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- exportloopref
|
||||
- gocheckcompilerdirectives
|
||||
# FIXME: for some reason this errors on windows
|
||||
# - gofmt
|
||||
# - goimports
|
||||
- misspell
|
||||
- nilerr
|
||||
- unused
|
||||
linters-settings:
|
||||
errcheck:
|
||||
# exclude the following functions since we don't generally
|
||||
# need to be concerned with the returned errors
|
||||
exclude-functions:
|
||||
- encoding/binary.Read
|
||||
- (*os.File).Seek
|
||||
- (*bufio.Writer).WriteString
|
||||
- (*github.com/spf13/pflag.FlagSet).Set
|
||||
- (*github.com/jmorganca/ollama/llm.readSeekOffset).Seek
|
140
Dockerfile
@@ -1,137 +1,23 @@
|
||||
ARG GOLANG_VERSION=1.21.3
|
||||
ARG CMAKE_VERSION=3.22.1
|
||||
ARG CUDA_VERSION=11.3.1
|
||||
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
||||
|
||||
# Copy the minimal context we need to run the generate scripts
|
||||
FROM scratch AS llm-code
|
||||
COPY .git .git
|
||||
COPY .gitmodules .gitmodules
|
||||
COPY llm llm
|
||||
ARG TARGETARCH
|
||||
ARG GOFLAGS="'-ldflags=-w -s'"
|
||||
|
||||
FROM --platform=linux/amd64 nvidia/cuda:$CUDA_VERSION-devel-centos7 AS cuda-build-amd64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
||||
|
||||
FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION-devel-rockylinux8 AS cuda-build-arm64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
||||
|
||||
FROM --platform=linux/amd64 rocm/dev-centos-7:5.7.1-complete AS rocm-5-build-amd64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
ENV LIBRARY_PATH /opt/amdgpu/lib64
|
||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
ARG AMDGPU_TARGETS
|
||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
||||
|
||||
FROM --platform=linux/amd64 rocm/dev-centos-7:6.0-complete AS rocm-6-build-amd64
|
||||
ARG CMAKE_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
ENV LIBRARY_PATH /opt/amdgpu/lib64
|
||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
||||
ARG CGO_CFLAGS
|
||||
ARG AMDGPU_TARGETS
|
||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
||||
|
||||
FROM --platform=linux/amd64 centos:7 AS cpu-builder-amd64
|
||||
ARG CMAKE_VERSION
|
||||
ARG GOLANG_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||
ARG CGO_CFLAGS
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
||||
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu-build-amd64
|
||||
RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx-build-amd64
|
||||
RUN OLLAMA_CPU_TARGET="cpu_avx" sh gen_linux.sh
|
||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64
|
||||
RUN OLLAMA_CPU_TARGET="cpu_avx2" sh gen_linux.sh
|
||||
|
||||
FROM --platform=linux/arm64 centos:7 AS cpu-build-arm64
|
||||
ARG CMAKE_VERSION
|
||||
ARG GOLANG_VERSION
|
||||
COPY ./scripts/rh_linux_deps.sh /
|
||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
||||
# Note, we only build the "base" CPU variant on arm since avx/avx2 are x86 features
|
||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
||||
ARG CGO_CFLAGS
|
||||
RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
|
||||
|
||||
# Intermediate stage used for ./scripts/build_linux.sh
|
||||
FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64
|
||||
ENV CGO_ENABLED 1
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
RUN apt-get update && apt-get install -y git build-essential cmake
|
||||
ADD https://dl.google.com/go/go1.21.1.linux-$TARGETARCH.tar.gz /tmp/go1.21.1.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.1.tar.gz
|
||||
|
||||
COPY . .
|
||||
COPY --from=cpu_avx-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
||||
COPY --from=cpu_avx2-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
||||
COPY --from=cuda-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
||||
COPY --from=rocm-5-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
||||
COPY --from=rocm-6-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
||||
ARG GOFLAGS
|
||||
ARG CGO_CFLAGS
|
||||
RUN go build .
|
||||
ENV GOARCH=$TARGETARCH
|
||||
ENV GOFLAGS=$GOFLAGS
|
||||
RUN /usr/local/go/bin/go generate ./... \
|
||||
&& /usr/local/go/bin/go build .
|
||||
|
||||
# Intermediate stage used for ./scripts/build_linux.sh
|
||||
FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64
|
||||
ENV CGO_ENABLED 1
|
||||
ARG GOLANG_VERSION
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
COPY . .
|
||||
COPY --from=cuda-build-arm64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
||||
ARG GOFLAGS
|
||||
ARG CGO_CFLAGS
|
||||
RUN go build .
|
||||
|
||||
# Runtime stages
|
||||
FROM --platform=linux/amd64 ubuntu:22.04 as runtime-amd64
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
||||
FROM --platform=linux/arm64 ubuntu:22.04 as runtime-arm64
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
COPY --from=build-arm64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
||||
|
||||
# Radeon images are much larger so we keep it distinct from the CPU/CUDA image
|
||||
FROM --platform=linux/amd64 rocm/dev-centos-7:5.7.1-complete as runtime-rocm
|
||||
RUN update-pciids
|
||||
COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
||||
COPY --from=0 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
||||
EXPOSE 11434
|
||||
ENV OLLAMA_HOST 0.0.0.0
|
||||
|
||||
ENTRYPOINT ["/bin/ollama"]
|
||||
CMD ["serve"]
|
||||
|
||||
FROM runtime-$TARGETARCH
|
||||
EXPOSE 11434
|
||||
ENV OLLAMA_HOST 0.0.0.0
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
|
||||
ENTRYPOINT ["/bin/ollama"]
|
||||
CMD ["serve"]
|
||||
|
32
Dockerfile.build
Normal file
@@ -0,0 +1,32 @@
|
||||
|
||||
# centos7 amd64 dependencies
|
||||
FROM --platform=linux/amd64 nvidia/cuda:11.8.0-devel-centos7 AS base-amd64
|
||||
RUN yum install -y https://repo.ius.io/ius-release-el7.rpm centos-release-scl && \
|
||||
yum update -y && \
|
||||
yum install -y devtoolset-10-gcc devtoolset-10-gcc-c++ git236 wget
|
||||
RUN wget "https://github.com/Kitware/CMake/releases/download/v3.27.6/cmake-3.27.6-linux-x86_64.sh" -O cmake-installer.sh && chmod +x cmake-installer.sh && ./cmake-installer.sh --skip-license --prefix=/usr/local
|
||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
||||
|
||||
# centos8 arm64 dependencies
|
||||
FROM --platform=linux/arm64 nvidia/cuda:11.4.3-devel-centos8 AS base-arm64
|
||||
RUN sed -i -e 's/mirrorlist/#mirrorlist/g' -e 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
|
||||
RUN yum install -y git cmake
|
||||
|
||||
FROM base-${TARGETARCH}
|
||||
ARG TARGETARCH
|
||||
ARG GOFLAGS="'-ldflags -w -s'"
|
||||
|
||||
# install go
|
||||
ADD https://dl.google.com/go/go1.21.1.linux-$TARGETARCH.tar.gz /tmp/go1.21.1.tar.gz
|
||||
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.1.tar.gz
|
||||
|
||||
# build the final binary
|
||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||
COPY . .
|
||||
|
||||
ENV GOOS=linux
|
||||
ENV GOARCH=$TARGETARCH
|
||||
ENV GOFLAGS=$GOFLAGS
|
||||
|
||||
RUN /usr/local/go/bin/go generate ./... && \
|
||||
/usr/local/go/bin/go build .
|
188
README.md
@@ -1,5 +1,8 @@
|
||||
<div align="center">
|
||||
<img alt="ollama" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" height="200px" srcset="https://github.com/jmorganca/ollama/assets/3325447/56ea1849-1284-4645-8970-956de6e51c3c">
|
||||
<img alt="logo" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
||||
</picture>
|
||||
</div>
|
||||
|
||||
# Ollama
|
||||
@@ -10,32 +13,23 @@ Get up and running with large language models locally.
|
||||
|
||||
### macOS
|
||||
|
||||
[Download](https://ollama.com/download/Ollama-darwin.zip)
|
||||
[Download](https://ollama.ai/download/Ollama-darwin.zip)
|
||||
|
||||
### Windows preview
|
||||
|
||||
[Download](https://ollama.com/download/OllamaSetup.exe)
|
||||
|
||||
### Linux
|
||||
### Linux & WSL2
|
||||
|
||||
```
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
curl https://ollama.ai/install.sh | sh
|
||||
```
|
||||
|
||||
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
|
||||
|
||||
### Docker
|
||||
### Windows
|
||||
|
||||
The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `ollama/ollama` is available on Docker Hub.
|
||||
|
||||
### Libraries
|
||||
|
||||
- [ollama-python](https://github.com/ollama/ollama-python)
|
||||
- [ollama-js](https://github.com/ollama/ollama-js)
|
||||
coming soon
|
||||
|
||||
## Quickstart
|
||||
|
||||
To run and chat with [Llama 2](https://ollama.com/library/llama2):
|
||||
To run and chat with [Llama 2](https://ollama.ai/library/llama2):
|
||||
|
||||
```
|
||||
ollama run llama2
|
||||
@@ -43,35 +37,30 @@ ollama run llama2
|
||||
|
||||
## Model library
|
||||
|
||||
Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library 'ollama model library')
|
||||
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library 'ollama model library')
|
||||
|
||||
Here are some example models that can be downloaded:
|
||||
Here are some example open-source models that can be downloaded:
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Dolphin Phi | 2.7B | 1.6GB | `ollama run dolphin-phi` |
|
||||
| Phi-2 | 2.7B | 1.7GB | `ollama run phi` |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
|
||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||
| Llama 2 13B | 13B | 7.3GB | `ollama run llama2:13b` |
|
||||
| Llama 2 70B | 70B | 39GB | `ollama run llama2:70b` |
|
||||
| Orca Mini | 3B | 1.9GB | `ollama run orca-mini` |
|
||||
| Vicuna | 7B | 3.8GB | `ollama run vicuna` |
|
||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||
|
||||
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||
> Note: You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
|
||||
|
||||
## Customize a model
|
||||
## Customize your own model
|
||||
|
||||
### Import from GGUF
|
||||
### Import from GGUF or GGML
|
||||
|
||||
Ollama supports importing GGUF models in the Modelfile:
|
||||
Ollama supports importing GGUF and GGML file formats in the Modelfile. This means if you have a model that is not in the Ollama library, you can create it, iterate on it, and upload it to the Ollama library to share with others when you are ready.
|
||||
|
||||
1. Create a file named `Modelfile`, with a `FROM` instruction with the local filepath to the model you want to import.
|
||||
1. Create a file named Modelfile, and add a `FROM` instruction with the local filepath to the model you want to import.
|
||||
|
||||
```
|
||||
FROM ./vicuna-33b.Q4_0.gguf
|
||||
@@ -80,22 +69,18 @@ Ollama supports importing GGUF models in the Modelfile:
|
||||
2. Create the model in Ollama
|
||||
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
ollama create name -f path_to_modelfile
|
||||
```
|
||||
|
||||
3. Run the model
|
||||
|
||||
```
|
||||
ollama run example
|
||||
ollama run name
|
||||
```
|
||||
|
||||
### Import from PyTorch or Safetensors
|
||||
|
||||
See the [guide](docs/import.md) on importing models for more information.
|
||||
|
||||
### Customize a prompt
|
||||
|
||||
Models from the Ollama library can be customized with a prompt. For example, to customize the `llama2` model:
|
||||
Models from the Ollama library can be customized with a prompt. The example
|
||||
|
||||
```
|
||||
ollama pull llama2
|
||||
@@ -109,7 +94,7 @@ FROM llama2
|
||||
# set the temperature to 1 [higher is more creative, lower is more coherent]
|
||||
PARAMETER temperature 1
|
||||
|
||||
# set the system message
|
||||
# set the system prompt
|
||||
SYSTEM """
|
||||
You are Mario from Super Mario Bros. Answer as Mario, the assistant, only.
|
||||
"""
|
||||
@@ -132,10 +117,6 @@ For more examples, see the [examples](examples) directory. For more information
|
||||
|
||||
`ollama create` is used to create a model from a Modelfile.
|
||||
|
||||
```
|
||||
ollama create mymodel -f ./Modelfile
|
||||
```
|
||||
|
||||
### Pull a model
|
||||
|
||||
```
|
||||
@@ -167,17 +148,10 @@ For multiline input, you can wrap text with `"""`:
|
||||
I'm a basic program that prints the famous "Hello, world!" message to the console.
|
||||
```
|
||||
|
||||
### Multimodal models
|
||||
|
||||
```
|
||||
>>> What's in this image? /Users/jmorgan/Desktop/smile.png
|
||||
The image features a yellow smiley face, which is likely the central focus of the picture.
|
||||
```
|
||||
|
||||
### Pass in prompt as arguments
|
||||
|
||||
```
|
||||
$ ollama run llama2 "Summarize this file: $(cat README.md)"
|
||||
$ ollama run llama2 "summarize this file:" "$(cat README.md)"
|
||||
Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications.
|
||||
```
|
||||
|
||||
@@ -196,25 +170,17 @@ ollama list
|
||||
Install `cmake` and `go`:
|
||||
|
||||
```
|
||||
brew install cmake go
|
||||
brew install cmake
|
||||
brew install go
|
||||
```
|
||||
|
||||
Then generate dependencies:
|
||||
Then generate dependencies and build:
|
||||
|
||||
```
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Then build the binary:
|
||||
|
||||
```
|
||||
go build .
|
||||
```
|
||||
|
||||
More detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md)
|
||||
|
||||
### Running local builds
|
||||
|
||||
Next, start the server:
|
||||
|
||||
```
|
||||
@@ -229,114 +195,30 @@ Finally, in a separate shell, run a model:
|
||||
|
||||
## REST API
|
||||
|
||||
Ollama has a REST API for running and managing models.
|
||||
> See the [API documentation](docs/api.md) for all endpoints.
|
||||
|
||||
### Generate a response
|
||||
Ollama has an API for running and managing models. For example to generate text from a model:
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
curl -X POST http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2",
|
||||
"prompt":"Why is the sky blue?"
|
||||
}'
|
||||
```
|
||||
|
||||
### Chat with a model
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/chat -d '{
|
||||
"model": "mistral",
|
||||
"messages": [
|
||||
{ "role": "user", "content": "why is the sky blue?" }
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
See the [API documentation](./docs/api.md) for all endpoints.
|
||||
|
||||
## Community Integrations
|
||||
|
||||
### Web & Desktop
|
||||
|
||||
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt)
|
||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
||||
- [Open WebUI](https://github.com/open-webui/open-webui)
|
||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||
- [big-AGI](https://github.com/enricoros/big-agi/blob/main/docs/config-ollama.md)
|
||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||
- [Amica](https://github.com/semperai/amica)
|
||||
- [chatd](https://github.com/BruceMacD/chatd)
|
||||
- [Ollama-SwiftUI](https://github.com/kghandour/Ollama-SwiftUI)
|
||||
- [MindMac](https://mindmac.app)
|
||||
- [NextJS Web Interface for Ollama](https://github.com/jakobhoeg/nextjs-ollama-llm-ui)
|
||||
|
||||
### Terminal
|
||||
|
||||
- [oterm](https://github.com/ggozad/oterm)
|
||||
- [Ellama Emacs client](https://github.com/s-kostyaev/ellama)
|
||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||
- [gen.nvim](https://github.com/David-Kunz/gen.nvim)
|
||||
- [ollama.nvim](https://github.com/nomnivore/ollama.nvim)
|
||||
- [ollama-chat.nvim](https://github.com/gerazov/ollama-chat.nvim)
|
||||
- [ogpt.nvim](https://github.com/huynle/ogpt.nvim)
|
||||
- [gptel Emacs client](https://github.com/karthink/gptel)
|
||||
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
||||
- [cmdh](https://github.com/pgibler/cmdh)
|
||||
- [tenere](https://github.com/pythops/tenere)
|
||||
- [llm-ollama](https://github.com/taketwo/llm-ollama) for [Datasette's LLM CLI](https://llm.datasette.io/en/stable/).
|
||||
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
||||
|
||||
### Database
|
||||
|
||||
- [MindsDB](https://github.com/mindsdb/mindsdb/blob/staging/mindsdb/integrations/handlers/ollama_handler/README.md)
|
||||
|
||||
### Package managers
|
||||
|
||||
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
||||
- [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama)
|
||||
|
||||
### Libraries
|
||||
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||
- [LangChain4j](https://github.com/langchain4j/langchain4j/tree/main/langchain4j-ollama)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
|
||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
||||
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
||||
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
||||
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
||||
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
||||
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel)
|
||||
- [LangChainDart](https://github.com/davidmigloz/langchain_dart)
|
||||
- [Semantic Kernel - Python](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai/ollama)
|
||||
- [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md)
|
||||
- [Ollama for R - rollama](https://github.com/JBGruber/rollama)
|
||||
- [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex)
|
||||
|
||||
### Mobile
|
||||
|
||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||
- [Maid](https://github.com/Mobile-Artificial-Intelligence/maid)
|
||||
|
||||
### Extensions & Plugins
|
||||
|
||||
- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama)
|
||||
- [Discollama](https://github.com/mxyng/discollama) (Discord bot inside the Ollama discord channel)
|
||||
- [Continue](https://github.com/continuedev/continue)
|
||||
- [Obsidian Ollama plugin](https://github.com/hinterdupfinger/obsidian-ollama)
|
||||
- [Logseq Ollama plugin](https://github.com/omagdy7/ollama-logseq)
|
||||
- [Dagger Chatbot](https://github.com/samalba/dagger-chatbot)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot)
|
||||
- [Ollama Telegram Bot](https://github.com/ruecat/ollama-telegram)
|
||||
- [Hass Ollama Conversation](https://github.com/ej52/hass-ollama-conversation)
|
||||
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
||||
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
||||
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
||||
- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama)
|
||||
- [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama)
|
||||
- [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)
|
||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||
- [Dumbar](https://github.com/JerrySievert/Dumbar)
|
||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||
|
@@ -5,7 +5,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -15,10 +14,13 @@ import (
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/jmorganca/ollama/format"
|
||||
"github.com/jmorganca/ollama/version"
|
||||
)
|
||||
|
||||
const DefaultHost = "127.0.0.1:11434"
|
||||
|
||||
var envHost = os.Getenv("OLLAMA_HOST")
|
||||
|
||||
type Client struct {
|
||||
base *url.URL
|
||||
http http.Client
|
||||
@@ -41,28 +43,16 @@ func checkError(resp *http.Response, body []byte) error {
|
||||
}
|
||||
|
||||
func ClientFromEnvironment() (*Client, error) {
|
||||
defaultPort := "11434"
|
||||
|
||||
scheme, hostport, ok := strings.Cut(os.Getenv("OLLAMA_HOST"), "://")
|
||||
switch {
|
||||
case !ok:
|
||||
if !ok {
|
||||
scheme, hostport = "http", os.Getenv("OLLAMA_HOST")
|
||||
case scheme == "http":
|
||||
defaultPort = "80"
|
||||
case scheme == "https":
|
||||
defaultPort = "443"
|
||||
}
|
||||
|
||||
// trim trailing slashes
|
||||
hostport = strings.TrimRight(hostport, "/")
|
||||
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
host, port = "127.0.0.1", defaultPort
|
||||
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
||||
host, port = "127.0.0.1", "11434"
|
||||
if ip := net.ParseIP(strings.Trim(os.Getenv("OLLAMA_HOST"), "[]")); ip != nil {
|
||||
host = ip.String()
|
||||
} else if hostport != "" {
|
||||
host = hostport
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +63,7 @@ func ClientFromEnvironment() (*Client, error) {
|
||||
},
|
||||
}
|
||||
|
||||
mockRequest, err := http.NewRequest(http.MethodHead, client.base.String(), nil)
|
||||
mockRequest, err := http.NewRequest("HEAD", client.base.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -96,19 +86,11 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
var reqBody io.Reader
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
switch reqData := reqData.(type) {
|
||||
case io.Reader:
|
||||
// reqData is already an io.Reader
|
||||
reqBody = reqData
|
||||
case nil:
|
||||
// noop
|
||||
default:
|
||||
if reqData != nil {
|
||||
data, err = json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
@@ -145,7 +127,7 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
return nil
|
||||
}
|
||||
|
||||
const maxBufferSize = 512 * format.KiloByte
|
||||
const maxBufferSize = 512 * 1000 // 512KB
|
||||
|
||||
func (c *Client) stream(ctx context.Context, method, path string, data any, fn func([]byte) error) error {
|
||||
var buf *bytes.Buffer
|
||||
@@ -221,19 +203,6 @@ func (c *Client) Generate(ctx context.Context, req *GenerateRequest, fn Generate
|
||||
})
|
||||
}
|
||||
|
||||
type ChatResponseFunc func(ChatResponse) error
|
||||
|
||||
func (c *Client) Chat(ctx context.Context, req *ChatRequest, fn ChatResponseFunc) error {
|
||||
return c.stream(ctx, http.MethodPost, "/api/chat", req, func(bts []byte) error {
|
||||
var resp ChatResponse
|
||||
if err := json.Unmarshal(bts, &resp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fn(resp)
|
||||
})
|
||||
}
|
||||
|
||||
type PullProgressFunc func(ProgressResponse) error
|
||||
|
||||
func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc) error {
|
||||
@@ -309,37 +278,3 @@ func (c *Client) Heartbeat(ctx context.Context) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (c *Client) Embeddings(ctx context.Context, req *EmbeddingRequest) (*EmbeddingResponse, error) {
|
||||
var resp EmbeddingResponse
|
||||
if err := c.do(ctx, http.MethodPost, "/api/embeddings", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
||||
if err := c.do(ctx, http.MethodHead, fmt.Sprintf("/api/blobs/%s", digest), nil, nil); err != nil {
|
||||
var statusError StatusError
|
||||
if !errors.As(err, &statusError) || statusError.StatusCode != http.StatusNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) Version(ctx context.Context) (string, error) {
|
||||
var version struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
if err := c.do(ctx, http.MethodGet, "/api/version", nil, &version); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return version.Version, nil
|
||||
}
|
||||
|
225
api/client.py
Normal file
@@ -0,0 +1,225 @@
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
|
||||
BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
||||
|
||||
# Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses.
|
||||
# The final response object will include statistics and additional data from the request. Use the callback function to override
|
||||
# the default handler.
|
||||
def generate(model_name, prompt, system=None, template=None, context=None, options=None, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/generate"
|
||||
payload = {
|
||||
"model": model_name,
|
||||
"prompt": prompt,
|
||||
"system": system,
|
||||
"template": template,
|
||||
"context": context,
|
||||
"options": options
|
||||
}
|
||||
|
||||
# Remove keys with None values
|
||||
payload = {k: v for k, v in payload.items() if v is not None}
|
||||
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Creating a variable to hold the context history of the final chunk
|
||||
final_context = None
|
||||
|
||||
# Variable to hold concatenated response strings if no callback is provided
|
||||
full_response = ""
|
||||
|
||||
# Iterating over the response line by line and displaying the details
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the details
|
||||
chunk = json.loads(line)
|
||||
|
||||
# If a callback function is provided, call it with the chunk
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
# If this is not the last chunk, add the "response" field value to full_response and print it
|
||||
if not chunk.get("done"):
|
||||
response_piece = chunk.get("response", "")
|
||||
full_response += response_piece
|
||||
print(response_piece, end="", flush=True)
|
||||
|
||||
# Check if it's the last chunk (done is true)
|
||||
if chunk.get("done"):
|
||||
final_context = chunk.get("context")
|
||||
|
||||
# Return the full response and the final context
|
||||
return full_response, final_context
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None, None
|
||||
|
||||
# Create a model from a Modelfile. Use the callback function to override the default handler.
|
||||
def create(model_name, model_path, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/create"
|
||||
payload = {"name": model_name, "path": model_path}
|
||||
|
||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Iterating over the response line by line and displaying the status
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the status
|
||||
chunk = json.loads(line)
|
||||
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
print(f"Status: {chunk.get('status')}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
# Pull a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple
|
||||
# calls to will share the same download progress. Use the callback function to override the default handler.
|
||||
def pull(model_name, insecure=False, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/pull"
|
||||
payload = {
|
||||
"name": model_name,
|
||||
"insecure": insecure
|
||||
}
|
||||
|
||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Iterating over the response line by line and displaying the details
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the details
|
||||
chunk = json.loads(line)
|
||||
|
||||
# If a callback function is provided, call it with the chunk
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
# Print the status message directly to the console
|
||||
print(chunk.get('status', ''), end='', flush=True)
|
||||
|
||||
# If there's layer data, you might also want to print that (adjust as necessary)
|
||||
if 'digest' in chunk:
|
||||
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
||||
print(f" - Total: {chunk['total']}", end='', flush=True)
|
||||
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
||||
else:
|
||||
print()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
# Push a model to the model registry. Use the callback function to override the default handler.
|
||||
def push(model_name, insecure=False, callback=None):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/push"
|
||||
payload = {
|
||||
"name": model_name,
|
||||
"insecure": insecure
|
||||
}
|
||||
|
||||
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||
with requests.post(url, json=payload, stream=True) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
# Iterating over the response line by line and displaying the details
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
# Parsing each line (JSON chunk) and extracting the details
|
||||
chunk = json.loads(line)
|
||||
|
||||
# If a callback function is provided, call it with the chunk
|
||||
if callback:
|
||||
callback(chunk)
|
||||
else:
|
||||
# Print the status message directly to the console
|
||||
print(chunk.get('status', ''), end='', flush=True)
|
||||
|
||||
# If there's layer data, you might also want to print that (adjust as necessary)
|
||||
if 'digest' in chunk:
|
||||
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
||||
print(f" - Total: {chunk['total']}", end='', flush=True)
|
||||
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
||||
else:
|
||||
print()
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
# List models that are available locally.
|
||||
def list():
|
||||
try:
|
||||
response = requests.get(f"{BASE_URL}/api/tags")
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
models = data.get('models', [])
|
||||
return models
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
# Copy a model. Creates a model with another name from an existing model.
|
||||
def copy(source, destination):
|
||||
try:
|
||||
# Create the JSON payload
|
||||
payload = {
|
||||
"source": source,
|
||||
"destination": destination
|
||||
}
|
||||
|
||||
response = requests.post(f"{BASE_URL}/api/copy", json=payload)
|
||||
response.raise_for_status()
|
||||
|
||||
# If the request was successful, return a message indicating that the copy was successful
|
||||
return "Copy successful"
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
# Delete a model and its data.
|
||||
def delete(model_name):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/delete"
|
||||
payload = {"name": model_name}
|
||||
response = requests.delete(url, json=payload)
|
||||
response.raise_for_status()
|
||||
return "Delete successful"
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
# Show info about a model.
|
||||
def show(model_name):
|
||||
try:
|
||||
url = f"{BASE_URL}/api/show"
|
||||
payload = {"name": model_name}
|
||||
response = requests.post(url, json=payload)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse the JSON response and return it
|
||||
data = response.json()
|
||||
return data
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
def heartbeat():
|
||||
try:
|
||||
url = f"{BASE_URL}/"
|
||||
response = requests.head(url)
|
||||
response.raise_for_status()
|
||||
return "Ollama is running"
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return "Ollama is not running"
|
||||
|
||||
|
@@ -1,43 +0,0 @@
|
||||
package api
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestClientFromEnvironment(t *testing.T) {
|
||||
type testCase struct {
|
||||
value string
|
||||
expect string
|
||||
err error
|
||||
}
|
||||
|
||||
testCases := map[string]*testCase{
|
||||
"empty": {value: "", expect: "http://127.0.0.1:11434"},
|
||||
"only address": {value: "1.2.3.4", expect: "http://1.2.3.4:11434"},
|
||||
"only port": {value: ":1234", expect: "http://:1234"},
|
||||
"address and port": {value: "1.2.3.4:1234", expect: "http://1.2.3.4:1234"},
|
||||
"scheme http and address": {value: "http://1.2.3.4", expect: "http://1.2.3.4:80"},
|
||||
"scheme https and address": {value: "https://1.2.3.4", expect: "https://1.2.3.4:443"},
|
||||
"scheme, address, and port": {value: "https://1.2.3.4:1234", expect: "https://1.2.3.4:1234"},
|
||||
"hostname": {value: "example.com", expect: "http://example.com:11434"},
|
||||
"hostname and port": {value: "example.com:1234", expect: "http://example.com:1234"},
|
||||
"scheme http and hostname": {value: "http://example.com", expect: "http://example.com:80"},
|
||||
"scheme https and hostname": {value: "https://example.com", expect: "https://example.com:443"},
|
||||
"scheme, hostname, and port": {value: "https://example.com:1234", expect: "https://example.com:1234"},
|
||||
"trailing slash": {value: "example.com/", expect: "http://example.com:11434"},
|
||||
"trailing slash port": {value: "example.com:1234/", expect: "http://example.com:1234"},
|
||||
}
|
||||
|
||||
for k, v := range testCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
t.Setenv("OLLAMA_HOST", v.value)
|
||||
|
||||
client, err := ClientFromEnvironment()
|
||||
if err != v.err {
|
||||
t.Fatalf("expected %s, got %s", v.err, err)
|
||||
}
|
||||
|
||||
if client.base.String() != v.expect {
|
||||
t.Fatalf("expected %s, got %s", v.expect, client.base.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
455
api/types.go
@@ -3,10 +3,10 @@ package api
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -31,50 +31,101 @@ func (e StatusError) Error() string {
|
||||
}
|
||||
}
|
||||
|
||||
type ImageData []byte
|
||||
|
||||
type GenerateRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
System string `json:"system"`
|
||||
Template string `json:"template"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Format string `json:"format"`
|
||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||
Images []ImageData `json:"images,omitempty"`
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
System string `json:"system"`
|
||||
Template string `json:"template"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages []Message `json:"messages"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
Format string `json:"format"`
|
||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||
type EmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"` // one of ["system", "user", "assistant"]
|
||||
Content string `json:"content"`
|
||||
Images []ImageData `json:"images,omitempty"`
|
||||
type EmbeddingResponse struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
}
|
||||
|
||||
type ChatResponse struct {
|
||||
type CreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type DeleteRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowRequest struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowResponse struct {
|
||||
License string `json:"license,omitempty"`
|
||||
Modelfile string `json:"modelfile,omitempty"`
|
||||
Parameters string `json:"parameters,omitempty"`
|
||||
Template string `json:"template,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
}
|
||||
|
||||
type CopyRequest struct {
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
}
|
||||
|
||||
type PullRequest struct {
|
||||
Name string `json:"name"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type ProgressResponse struct {
|
||||
Status string `json:"status"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Total int64 `json:"total,omitempty"`
|
||||
Completed int64 `json:"completed,omitempty"`
|
||||
}
|
||||
|
||||
type PushRequest struct {
|
||||
Name string `json:"name"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
Models []ModelResponse `json:"models"`
|
||||
}
|
||||
|
||||
type ModelResponse struct {
|
||||
Name string `json:"name"`
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Size int64 `json:"size"`
|
||||
Digest string `json:"digest"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type GenerateResponse struct {
|
||||
Model string `json:"model"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Message Message `json:"message"`
|
||||
Response string `json:"response"`
|
||||
|
||||
Done bool `json:"done"`
|
||||
Done bool `json:"done"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
|
||||
Metrics
|
||||
}
|
||||
|
||||
type Metrics struct {
|
||||
TotalDuration time.Duration `json:"total_duration,omitempty"`
|
||||
LoadDuration time.Duration `json:"load_duration,omitempty"`
|
||||
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
|
||||
@@ -83,13 +134,58 @@ type Metrics struct {
|
||||
EvalDuration time.Duration `json:"eval_duration,omitempty"`
|
||||
}
|
||||
|
||||
// Options specfied in GenerateRequest, if you add a new option here add it to the API docs also
|
||||
type Options struct {
|
||||
Runner
|
||||
func (r *GenerateResponse) Summary() {
|
||||
if r.TotalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "total duration: %v\n", r.TotalDuration)
|
||||
}
|
||||
|
||||
// Predict options used at runtime
|
||||
NumKeep int `json:"num_keep,omitempty"`
|
||||
Seed int `json:"seed,omitempty"`
|
||||
if r.LoadDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "load duration: %v\n", r.LoadDuration)
|
||||
}
|
||||
|
||||
if r.PromptEvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
|
||||
}
|
||||
|
||||
if r.PromptEvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", r.PromptEvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(r.PromptEvalCount)/r.PromptEvalDuration.Seconds())
|
||||
}
|
||||
|
||||
if r.EvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", r.EvalCount)
|
||||
}
|
||||
|
||||
if r.EvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval duration: %s\n", r.EvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(r.EvalCount)/r.EvalDuration.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Seed int `json:"seed,omitempty"`
|
||||
|
||||
// Backend options
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
|
||||
// Model options
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
NumKeep int `json:"num_keep,omitempty"`
|
||||
NumBatch int `json:"num_batch,omitempty"`
|
||||
NumGQA int `json:"num_gqa,omitempty"`
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
MainGPU int `json:"main_gpu,omitempty"`
|
||||
LowVRAM bool `json:"low_vram,omitempty"`
|
||||
F16KV bool `json:"f16_kv,omitempty"`
|
||||
LogitsAll bool `json:"logits_all,omitempty"`
|
||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||
UseMMap bool `json:"use_mmap,omitempty"`
|
||||
UseMLock bool `json:"use_mlock,omitempty"`
|
||||
EmbeddingOnly bool `json:"embedding_only,omitempty"`
|
||||
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
|
||||
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
|
||||
|
||||
// Predict options
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP float32 `json:"top_p,omitempty"`
|
||||
@@ -105,175 +201,8 @@ type Options struct {
|
||||
MirostatEta float32 `json:"mirostat_eta,omitempty"`
|
||||
PenalizeNewline bool `json:"penalize_newline,omitempty"`
|
||||
Stop []string `json:"stop,omitempty"`
|
||||
}
|
||||
|
||||
// Runner options which must be set when the model is loaded into memory
|
||||
type Runner struct {
|
||||
UseNUMA bool `json:"numa,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
NumBatch int `json:"num_batch,omitempty"`
|
||||
NumGQA int `json:"num_gqa,omitempty"`
|
||||
NumGPU int `json:"num_gpu,omitempty"`
|
||||
MainGPU int `json:"main_gpu,omitempty"`
|
||||
LowVRAM bool `json:"low_vram,omitempty"`
|
||||
F16KV bool `json:"f16_kv,omitempty"`
|
||||
LogitsAll bool `json:"logits_all,omitempty"`
|
||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||
UseMMap bool `json:"use_mmap,omitempty"`
|
||||
UseMLock bool `json:"use_mlock,omitempty"`
|
||||
EmbeddingOnly bool `json:"embedding_only,omitempty"`
|
||||
RopeFrequencyBase float32 `json:"rope_frequency_base,omitempty"`
|
||||
RopeFrequencyScale float32 `json:"rope_frequency_scale,omitempty"`
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
}
|
||||
|
||||
type CreateRequest struct {
|
||||
Model string `json:"model"`
|
||||
Path string `json:"path"`
|
||||
Modelfile string `json:"modelfile"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
|
||||
// Name is deprecated, see Model
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type DeleteRequest struct {
|
||||
Model string `json:"model"`
|
||||
|
||||
// Name is deprecated, see Model
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowRequest struct {
|
||||
Model string `json:"model"`
|
||||
System string `json:"system"`
|
||||
Template string `json:"template"`
|
||||
|
||||
Options map[string]interface{} `json:"options"`
|
||||
|
||||
// Name is deprecated, see Model
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ShowResponse struct {
|
||||
License string `json:"license,omitempty"`
|
||||
Modelfile string `json:"modelfile,omitempty"`
|
||||
Parameters string `json:"parameters,omitempty"`
|
||||
Template string `json:"template,omitempty"`
|
||||
System string `json:"system,omitempty"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
Messages []Message `json:"messages,omitempty"`
|
||||
}
|
||||
|
||||
type CopyRequest struct {
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
}
|
||||
|
||||
type PullRequest struct {
|
||||
Model string `json:"model"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
|
||||
// Name is deprecated, see Model
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ProgressResponse struct {
|
||||
Status string `json:"status"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Total int64 `json:"total,omitempty"`
|
||||
Completed int64 `json:"completed,omitempty"`
|
||||
}
|
||||
|
||||
type PushRequest struct {
|
||||
Model string `json:"model"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
|
||||
// Name is deprecated, see Model
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
Models []ModelResponse `json:"models"`
|
||||
}
|
||||
|
||||
type ModelResponse struct {
|
||||
Name string `json:"name"`
|
||||
Model string `json:"model"`
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
Size int64 `json:"size"`
|
||||
Digest string `json:"digest"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type GenerateResponse struct {
|
||||
Model string `json:"model"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Response string `json:"response"`
|
||||
|
||||
Done bool `json:"done"`
|
||||
Context []int `json:"context,omitempty"`
|
||||
|
||||
Metrics
|
||||
}
|
||||
|
||||
type ModelDetails struct {
|
||||
ParentModel string `json:"parent_model"`
|
||||
Format string `json:"format"`
|
||||
Family string `json:"family"`
|
||||
Families []string `json:"families"`
|
||||
ParameterSize string `json:"parameter_size"`
|
||||
QuantizationLevel string `json:"quantization_level"`
|
||||
}
|
||||
|
||||
func (m *Metrics) Summary() {
|
||||
if m.TotalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "total duration: %v\n", m.TotalDuration)
|
||||
}
|
||||
|
||||
if m.LoadDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "load duration: %v\n", m.LoadDuration)
|
||||
}
|
||||
|
||||
if m.PromptEvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", m.PromptEvalCount)
|
||||
}
|
||||
|
||||
if m.PromptEvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", m.PromptEvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(m.PromptEvalCount)/m.PromptEvalDuration.Seconds())
|
||||
}
|
||||
|
||||
if m.EvalCount > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", m.EvalCount)
|
||||
}
|
||||
|
||||
if m.EvalDuration > 0 {
|
||||
fmt.Fprintf(os.Stderr, "eval duration: %s\n", m.EvalDuration)
|
||||
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(m.EvalCount)/m.EvalDuration.Seconds())
|
||||
}
|
||||
NumThread int `json:"num_thread,omitempty"`
|
||||
}
|
||||
|
||||
var ErrInvalidOpts = fmt.Errorf("invalid options")
|
||||
@@ -309,39 +238,44 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
// when JSON unmarshals numbers, it uses float64, not int
|
||||
field.SetInt(int64(t))
|
||||
default:
|
||||
return fmt.Errorf("option %q must be of type integer", key)
|
||||
log.Printf("could not convert model parameter %v of type %T to int, skipped", key, val)
|
||||
}
|
||||
case reflect.Bool:
|
||||
val, ok := val.(bool)
|
||||
if !ok {
|
||||
return fmt.Errorf("option %q must be of type boolean", key)
|
||||
log.Printf("could not convert model parameter %v of type %T to bool, skipped", key, val)
|
||||
continue
|
||||
}
|
||||
field.SetBool(val)
|
||||
case reflect.Float32:
|
||||
// JSON unmarshals to float64
|
||||
val, ok := val.(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("option %q must be of type float32", key)
|
||||
log.Printf("could not convert model parameter %v of type %T to float32, skipped", key, val)
|
||||
continue
|
||||
}
|
||||
field.SetFloat(val)
|
||||
case reflect.String:
|
||||
val, ok := val.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("option %q must be of type string", key)
|
||||
log.Printf("could not convert model parameter %v of type %T to string, skipped", key, val)
|
||||
continue
|
||||
}
|
||||
field.SetString(val)
|
||||
case reflect.Slice:
|
||||
// JSON unmarshals to []interface{}, not []string
|
||||
val, ok := val.([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("option %q must be of type array", key)
|
||||
log.Printf("could not convert model parameter %v of type %T to slice, skipped", key, val)
|
||||
continue
|
||||
}
|
||||
// convert []interface{} to []string
|
||||
slice := make([]string, len(val))
|
||||
for i, item := range val {
|
||||
str, ok := item.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("option %q must be of an array of strings", key)
|
||||
log.Printf("could not convert model parameter %v of type %T to slice of strings, skipped", key, item)
|
||||
continue
|
||||
}
|
||||
slice[i] = str
|
||||
}
|
||||
@@ -365,7 +299,7 @@ func DefaultOptions() Options {
|
||||
return Options{
|
||||
// options set on request to runner
|
||||
NumPredict: -1,
|
||||
NumKeep: 0,
|
||||
NumKeep: -1,
|
||||
Temperature: 0.8,
|
||||
TopK: 40,
|
||||
TopP: 0.9,
|
||||
@@ -381,22 +315,20 @@ func DefaultOptions() Options {
|
||||
PenalizeNewline: true,
|
||||
Seed: -1,
|
||||
|
||||
Runner: Runner{
|
||||
// options set when the model is loaded
|
||||
NumCtx: 2048,
|
||||
RopeFrequencyBase: 10000.0,
|
||||
RopeFrequencyScale: 1.0,
|
||||
NumBatch: 512,
|
||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||
NumGQA: 1,
|
||||
NumThread: 0, // let the runtime decide
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMLock: false,
|
||||
UseMMap: true,
|
||||
UseNUMA: false,
|
||||
EmbeddingOnly: true,
|
||||
},
|
||||
// options set when the model is loaded
|
||||
NumCtx: 2048,
|
||||
RopeFrequencyBase: 10000.0,
|
||||
RopeFrequencyScale: 1.0,
|
||||
NumBatch: 512,
|
||||
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
|
||||
NumGQA: 1,
|
||||
NumThread: 0, // let the runtime decide
|
||||
LowVRAM: false,
|
||||
F16KV: true,
|
||||
UseMLock: false,
|
||||
UseMMap: true,
|
||||
UseNUMA: false,
|
||||
EmbeddingOnly: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -415,79 +347,16 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
||||
switch t := v.(type) {
|
||||
case float64:
|
||||
if t < 0 {
|
||||
d.Duration = time.Duration(math.MaxInt64)
|
||||
} else {
|
||||
d.Duration = time.Duration(t * float64(time.Second))
|
||||
t = math.MaxFloat64
|
||||
}
|
||||
|
||||
d.Duration = time.Duration(t)
|
||||
case string:
|
||||
d.Duration, err = time.ParseDuration(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.Duration < 0 {
|
||||
d.Duration = time.Duration(math.MaxInt64)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FormatParams converts specified parameter options to their correct types
|
||||
func FormatParams(params map[string][]string) (map[string]interface{}, error) {
|
||||
opts := Options{}
|
||||
valueOpts := reflect.ValueOf(&opts).Elem() // names of the fields in the options struct
|
||||
typeOpts := reflect.TypeOf(opts) // types of the fields in the options struct
|
||||
|
||||
// build map of json struct tags to their types
|
||||
jsonOpts := make(map[string]reflect.StructField)
|
||||
for _, field := range reflect.VisibleFields(typeOpts) {
|
||||
jsonTag := strings.Split(field.Tag.Get("json"), ",")[0]
|
||||
if jsonTag != "" {
|
||||
jsonOpts[jsonTag] = field
|
||||
}
|
||||
}
|
||||
|
||||
out := make(map[string]interface{})
|
||||
// iterate params and set values based on json struct tags
|
||||
for key, vals := range params {
|
||||
if opt, ok := jsonOpts[key]; !ok {
|
||||
return nil, fmt.Errorf("unknown parameter '%s'", key)
|
||||
} else {
|
||||
field := valueOpts.FieldByName(opt.Name)
|
||||
if field.IsValid() && field.CanSet() {
|
||||
switch field.Kind() {
|
||||
case reflect.Float32:
|
||||
floatVal, err := strconv.ParseFloat(vals[0], 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid float value %s", vals)
|
||||
}
|
||||
|
||||
out[key] = float32(floatVal)
|
||||
case reflect.Int:
|
||||
intVal, err := strconv.ParseInt(vals[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid int value %s", vals)
|
||||
}
|
||||
|
||||
out[key] = intVal
|
||||
case reflect.Bool:
|
||||
boolVal, err := strconv.ParseBool(vals[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid bool value %s", vals)
|
||||
}
|
||||
|
||||
out[key] = boolVal
|
||||
case reflect.String:
|
||||
out[key] = vals[0]
|
||||
case reflect.Slice:
|
||||
// TODO: only string slices are supported right now
|
||||
out[key] = vals
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
93
app/.gitignore
vendored
@@ -1 +1,92 @@
|
||||
ollama.syso
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||
|
||||
# Runtime data
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
.DS_Store
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# node-waf configuration
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# TypeScript v1 declaration files
|
||||
typings/
|
||||
|
||||
# TypeScript cache
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variables file
|
||||
.env
|
||||
.env.test
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
.cache
|
||||
|
||||
# next.js build output
|
||||
.next
|
||||
|
||||
# nuxt.js build output
|
||||
.nuxt
|
||||
|
||||
# vuepress build output
|
||||
.vuepress/dist
|
||||
|
||||
# Serverless directories
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
.dynamodb/
|
||||
|
||||
# Webpack
|
||||
.webpack/
|
||||
|
||||
# Vite
|
||||
.vite/
|
||||
|
||||
# Electron-Forge
|
||||
out/
|
||||
|
@@ -1,22 +1,21 @@
|
||||
# Ollama App
|
||||
# Desktop
|
||||
|
||||
## Linux
|
||||
This app builds upon Ollama to provide a desktop experience for running models.
|
||||
|
||||
TODO
|
||||
## Developing
|
||||
|
||||
## MacOS
|
||||
|
||||
TODO
|
||||
|
||||
## Windows
|
||||
|
||||
If you want to build the installer, youll need to install
|
||||
- https://jrsoftware.org/isinfo.php
|
||||
|
||||
|
||||
In the top directory of this repo, run the following powershell script
|
||||
to build the ollama CLI, ollama app, and ollama installer.
|
||||
First, build the `ollama` binary:
|
||||
|
||||
```
|
||||
powershell -ExecutionPolicy Bypass -File .\scripts\build_windows.ps1
|
||||
cd ..
|
||||
go build .
|
||||
```
|
||||
|
||||
Then run the desktop app with `npm start`:
|
||||
|
||||
```
|
||||
cd app
|
||||
npm install
|
||||
npm start
|
||||
```
|
||||
|
||||
|
Before Width: | Height: | Size: 7.3 KiB |
@@ -1,17 +0,0 @@
|
||||
package assets
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
//go:embed *.ico
|
||||
var icons embed.FS
|
||||
|
||||
func ListIcons() ([]string, error) {
|
||||
return fs.Glob(icons, "*")
|
||||
}
|
||||
|
||||
func GetIcon(filename string) ([]byte, error) {
|
||||
return icons.ReadFile(filename)
|
||||
}
|
Before Width: | Height: | Size: 402 B After Width: | Height: | Size: 402 B |
Before Width: | Height: | Size: 741 B After Width: | Height: | Size: 741 B |
Before Width: | Height: | Size: 440 B After Width: | Height: | Size: 440 B |
Before Width: | Height: | Size: 763 B After Width: | Height: | Size: 763 B |
Before Width: | Height: | Size: 447 B After Width: | Height: | Size: 447 B |
Before Width: | Height: | Size: 891 B After Width: | Height: | Size: 891 B |
Before Width: | Height: | Size: 443 B After Width: | Height: | Size: 443 B |
Before Width: | Height: | Size: 844 B After Width: | Height: | Size: 844 B |
Before Width: | Height: | Size: 76 KiB |
Before Width: | Height: | Size: 89 KiB |
Before Width: | Height: | Size: 91 KiB |
@@ -47,6 +47,16 @@ const config: ForgeConfig = {
|
||||
},
|
||||
rebuildConfig: {},
|
||||
makers: [new MakerSquirrel({}), new MakerZIP({}, ['darwin'])],
|
||||
publishers: [
|
||||
new PublisherGithub({
|
||||
repository: {
|
||||
name: 'ollama',
|
||||
owner: 'jmorganca',
|
||||
},
|
||||
draft: false,
|
||||
prerelease: true,
|
||||
}),
|
||||
],
|
||||
hooks: {
|
||||
readPackageJson: async (_, packageJson) => {
|
||||
return { ...packageJson, version: process.env.VERSION || packageJson.version }
|
@@ -1,9 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package lifecycle
|
||||
|
||||
import "fmt"
|
||||
|
||||
func GetStarted() error {
|
||||
return fmt.Errorf("GetStarted not implemented")
|
||||
}
|
@@ -1,44 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func GetStarted() error {
|
||||
const CREATE_NEW_CONSOLE = 0x00000010
|
||||
var err error
|
||||
bannerScript := filepath.Join(AppDir, "ollama_welcome.ps1")
|
||||
args := []string{
|
||||
// TODO once we're signed, the execution policy bypass should be removed
|
||||
"powershell", "-noexit", "-ExecutionPolicy", "Bypass", "-nologo", "-file", bannerScript,
|
||||
}
|
||||
args[0], err = exec.LookPath(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure the script actually exists
|
||||
_, err = os.Stat(bannerScript)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting started banner script error %s", err)
|
||||
}
|
||||
|
||||
slog.Info(fmt.Sprintf("opening getting started terminal with %v", args))
|
||||
attrs := &os.ProcAttr{
|
||||
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
|
||||
Sys: &syscall.SysProcAttr{CreationFlags: CREATE_NEW_CONSOLE, HideWindow: false},
|
||||
}
|
||||
proc, err := os.StartProcess(args[0], args, attrs)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to start getting started shell %w", err)
|
||||
}
|
||||
|
||||
slog.Debug(fmt.Sprintf("getting started terminal PID: %d", proc.Pid))
|
||||
return proc.Release()
|
||||
}
|
@@ -1,92 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/jmorganca/ollama/app/store"
|
||||
"github.com/jmorganca/ollama/app/tray"
|
||||
)
|
||||
|
||||
func Run() {
|
||||
InitLogging()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var done chan int
|
||||
|
||||
t, err := tray.NewTray()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to start: %s", err)
|
||||
}
|
||||
callbacks := t.GetCallbacks()
|
||||
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
slog.Debug("starting callback loop")
|
||||
for {
|
||||
select {
|
||||
case <-callbacks.Quit:
|
||||
slog.Debug("quit called")
|
||||
t.Quit()
|
||||
case <-signals:
|
||||
slog.Debug("shutting down due to signal")
|
||||
t.Quit()
|
||||
case <-callbacks.Update:
|
||||
err := DoUpgrade(cancel, done)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("upgrade attempt failed: %s", err))
|
||||
}
|
||||
case <-callbacks.ShowLogs:
|
||||
ShowLogs()
|
||||
case <-callbacks.DoFirstUse:
|
||||
err := GetStarted()
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("Failed to launch getting started shell: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Are we first use?
|
||||
if !store.GetFirstTimeRun() {
|
||||
slog.Debug("First time run")
|
||||
err = t.DisplayFirstUseNotification()
|
||||
if err != nil {
|
||||
slog.Debug(fmt.Sprintf("XXX failed to display first use notification %v", err))
|
||||
}
|
||||
store.SetFirstTimeRun(true)
|
||||
} else {
|
||||
slog.Debug("Not first time, skipping first run notification")
|
||||
}
|
||||
|
||||
if IsServerRunning(ctx) {
|
||||
slog.Info("Detected another instance of ollama running, exiting")
|
||||
os.Exit(1)
|
||||
} else {
|
||||
done, err = SpawnServer(ctx, CLIName)
|
||||
if err != nil {
|
||||
// TODO - should we retry in a backoff loop?
|
||||
// TODO - should we pop up a warning and maybe add a menu item to view application logs?
|
||||
slog.Error(fmt.Sprintf("Failed to spawn ollama server %s", err))
|
||||
done = make(chan int, 1)
|
||||
done <- 1
|
||||
}
|
||||
}
|
||||
|
||||
StartBackgroundUpdaterChecker(ctx, t.UpdateAvailable)
|
||||
|
||||
t.Run()
|
||||
cancel()
|
||||
slog.Info("Waiting for ollama server to shutdown...")
|
||||
if done != nil {
|
||||
<-done
|
||||
}
|
||||
slog.Info("Ollama app exiting")
|
||||
}
|
@@ -1,46 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func InitLogging() {
|
||||
level := slog.LevelInfo
|
||||
|
||||
if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
|
||||
level = slog.LevelDebug
|
||||
}
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
// Detect if we're a GUI app on windows, and if not, send logs to console
|
||||
if os.Stderr.Fd() != 0 {
|
||||
// Console app detected
|
||||
logFile = os.Stderr
|
||||
// TODO - write one-line to the app.log file saying we're running in console mode to help avoid confusion
|
||||
} else {
|
||||
logFile, err = os.OpenFile(AppLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to create server log %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
handler := slog.NewTextHandler(logFile, &slog.HandlerOptions{
|
||||
Level: level,
|
||||
AddSource: true,
|
||||
ReplaceAttr: func(_ []string, attr slog.Attr) slog.Attr {
|
||||
if attr.Key == slog.SourceKey {
|
||||
source := attr.Value.Any().(*slog.Source)
|
||||
source.File = filepath.Base(source.File)
|
||||
}
|
||||
return attr
|
||||
},
|
||||
})
|
||||
|
||||
slog.SetDefault(slog.New(handler))
|
||||
|
||||
slog.Info("ollama app started")
|
||||
}
|
@@ -1,9 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package lifecycle
|
||||
|
||||
import "log/slog"
|
||||
|
||||
func ShowLogs() {
|
||||
slog.Warn("ShowLogs not yet implemented")
|
||||
}
|
@@ -1,19 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func ShowLogs() {
|
||||
cmd_path := "c:\\Windows\\system32\\cmd.exe"
|
||||
slog.Debug(fmt.Sprintf("viewing logs with start %s", AppDataDir))
|
||||
cmd := exec.Command(cmd_path, "/c", "start", AppDataDir)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: false, CreationFlags: 0x08000000}
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("Failed to open log dir: %s", err))
|
||||
}
|
||||
}
|
@@ -1,79 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
AppName = "ollama app"
|
||||
CLIName = "ollama"
|
||||
AppDir = "/opt/Ollama"
|
||||
AppDataDir = "/opt/Ollama"
|
||||
// TODO - should there be a distinct log dir?
|
||||
UpdateStageDir = "/tmp"
|
||||
AppLogFile = "/tmp/ollama_app.log"
|
||||
ServerLogFile = "/tmp/ollama.log"
|
||||
UpgradeLogFile = "/tmp/ollama_update.log"
|
||||
Installer = "OllamaSetup.exe"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if runtime.GOOS == "windows" {
|
||||
AppName += ".exe"
|
||||
CLIName += ".exe"
|
||||
// Logs, configs, downloads go to LOCALAPPDATA
|
||||
localAppData := os.Getenv("LOCALAPPDATA")
|
||||
AppDataDir = filepath.Join(localAppData, "Ollama")
|
||||
UpdateStageDir = filepath.Join(AppDataDir, "updates")
|
||||
AppLogFile = filepath.Join(AppDataDir, "app.log")
|
||||
ServerLogFile = filepath.Join(AppDataDir, "server.log")
|
||||
UpgradeLogFile = filepath.Join(AppDataDir, "upgrade.log")
|
||||
|
||||
// Executables are stored in APPDATA
|
||||
AppDir = filepath.Join(localAppData, "Programs", "Ollama")
|
||||
|
||||
// Make sure we have PATH set correctly for any spawned children
|
||||
paths := strings.Split(os.Getenv("PATH"), ";")
|
||||
// Start with whatever we find in the PATH/LD_LIBRARY_PATH
|
||||
found := false
|
||||
for _, path := range paths {
|
||||
d, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(AppDir, d) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
paths = append(paths, AppDir)
|
||||
|
||||
pathVal := strings.Join(paths, ";")
|
||||
slog.Debug("setting PATH=" + pathVal)
|
||||
err := os.Setenv("PATH", pathVal)
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to update PATH: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure our logging dir exists
|
||||
_, err := os.Stat(AppDataDir)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(AppDataDir, 0o755); err != nil {
|
||||
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
|
||||
}
|
||||
}
|
||||
|
||||
} else if runtime.GOOS == "darwin" {
|
||||
// TODO
|
||||
AppName += ".app"
|
||||
// } else if runtime.GOOS == "linux" {
|
||||
// TODO
|
||||
}
|
||||
}
|
@@ -1,139 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
func getCLIFullPath(command string) string {
|
||||
cmdPath := ""
|
||||
appExe, err := os.Executable()
|
||||
if err == nil {
|
||||
cmdPath = filepath.Join(filepath.Dir(appExe), command)
|
||||
_, err := os.Stat(cmdPath)
|
||||
if err == nil {
|
||||
return cmdPath
|
||||
}
|
||||
}
|
||||
cmdPath, err = exec.LookPath(command)
|
||||
if err == nil {
|
||||
_, err := os.Stat(cmdPath)
|
||||
if err == nil {
|
||||
return cmdPath
|
||||
}
|
||||
}
|
||||
pwd, err := os.Getwd()
|
||||
if err == nil {
|
||||
cmdPath = filepath.Join(pwd, command)
|
||||
_, err = os.Stat(cmdPath)
|
||||
if err == nil {
|
||||
return cmdPath
|
||||
}
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func SpawnServer(ctx context.Context, command string) (chan int, error) {
|
||||
done := make(chan int)
|
||||
|
||||
logDir := filepath.Dir(ServerLogFile)
|
||||
_, err := os.Stat(logDir)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||
return done, fmt.Errorf("create ollama server log dir %s: %v", logDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
cmd := getCmd(ctx, getCLIFullPath(command))
|
||||
// send stdout and stderr to a file
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return done, fmt.Errorf("failed to spawn server stdout pipe %s", err)
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return done, fmt.Errorf("failed to spawn server stderr pipe %s", err)
|
||||
}
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return done, fmt.Errorf("failed to spawn server stdin pipe %s", err)
|
||||
}
|
||||
|
||||
// TODO - rotation
|
||||
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return done, fmt.Errorf("failed to create server log %w", err)
|
||||
}
|
||||
go func() {
|
||||
defer logFile.Close()
|
||||
io.Copy(logFile, stdout) //nolint:errcheck
|
||||
}()
|
||||
go func() {
|
||||
defer logFile.Close()
|
||||
io.Copy(logFile, stderr) //nolint:errcheck
|
||||
}()
|
||||
|
||||
// run the command and wait for it to finish
|
||||
if err := cmd.Start(); err != nil {
|
||||
return done, fmt.Errorf("failed to start server %w", err)
|
||||
}
|
||||
if cmd.Process != nil {
|
||||
slog.Info(fmt.Sprintf("started ollama server with pid %d", cmd.Process.Pid))
|
||||
}
|
||||
slog.Info(fmt.Sprintf("ollama server logs %s", ServerLogFile))
|
||||
|
||||
go func() {
|
||||
// Keep the server running unless we're shuttind down the app
|
||||
crashCount := 0
|
||||
for {
|
||||
cmd.Wait() //nolint:errcheck
|
||||
stdin.Close()
|
||||
var code int
|
||||
if cmd.ProcessState != nil {
|
||||
code = cmd.ProcessState.ExitCode()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
slog.Debug(fmt.Sprintf("server shutdown with exit code %d", code))
|
||||
done <- code
|
||||
return
|
||||
default:
|
||||
crashCount++
|
||||
slog.Warn(fmt.Sprintf("server crash %d - exit code %d - respawning", crashCount, code))
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
if err := cmd.Start(); err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to restart server %s", err))
|
||||
// Keep trying, but back off if we keep failing
|
||||
time.Sleep(time.Duration(crashCount) * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return done, nil
|
||||
}
|
||||
|
||||
func IsServerRunning(ctx context.Context) bool {
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
slog.Info("unable to connect to server")
|
||||
return false
|
||||
}
|
||||
err = client.Heartbeat(ctx)
|
||||
if err != nil {
|
||||
slog.Debug(fmt.Sprintf("heartbeat from server: %s", err))
|
||||
slog.Info("unable to connect to server")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func getCmd(ctx context.Context, cmd string) *exec.Cmd {
|
||||
return exec.CommandContext(ctx, cmd, "serve")
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func getCmd(ctx context.Context, exePath string) *exec.Cmd {
|
||||
cmd := exec.CommandContext(ctx, exePath, "serve")
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true, CreationFlags: 0x08000000}
|
||||
return cmd
|
||||
}
|
@@ -1,238 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jmorganca/ollama/auth"
|
||||
"github.com/jmorganca/ollama/version"
|
||||
)
|
||||
|
||||
var (
|
||||
UpdateCheckURLBase = "https://ollama.com/api/update"
|
||||
UpdateDownloaded = false
|
||||
UpdateCheckInterval = 60 * 60 * time.Second
|
||||
)
|
||||
|
||||
// TODO - maybe move up to the API package?
|
||||
type UpdateResponse struct {
|
||||
UpdateURL string `json:"url"`
|
||||
UpdateVersion string `json:"version"`
|
||||
}
|
||||
|
||||
func getClient(req *http.Request) http.Client {
|
||||
proxyURL, err := http.ProxyFromEnvironment(req)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("failed to handle proxy: %s", err))
|
||||
return http.Client{}
|
||||
}
|
||||
|
||||
return http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyURL(proxyURL),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
|
||||
var updateResp UpdateResponse
|
||||
|
||||
requestURL, err := url.Parse(UpdateCheckURLBase)
|
||||
if err != nil {
|
||||
return false, updateResp
|
||||
}
|
||||
|
||||
query := requestURL.Query()
|
||||
query.Add("os", runtime.GOOS)
|
||||
query.Add("arch", runtime.GOARCH)
|
||||
query.Add("version", version.Version)
|
||||
query.Add("ts", fmt.Sprintf("%d", time.Now().Unix()))
|
||||
|
||||
nonce, err := auth.NewNonce(rand.Reader, 16)
|
||||
if err != nil {
|
||||
return false, updateResp
|
||||
}
|
||||
|
||||
query.Add("nonce", nonce)
|
||||
requestURL.RawQuery = query.Encode()
|
||||
|
||||
data := []byte(fmt.Sprintf("%s,%s", http.MethodGet, requestURL.RequestURI()))
|
||||
signature, err := auth.Sign(ctx, data)
|
||||
if err != nil {
|
||||
return false, updateResp
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestURL.String(), nil)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("failed to check for update: %s", err))
|
||||
return false, updateResp
|
||||
}
|
||||
req.Header.Set("Authorization", signature)
|
||||
req.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
|
||||
client := getClient(req)
|
||||
|
||||
slog.Debug("checking for available update", "requestURL", requestURL)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("failed to check for update: %s", err))
|
||||
return false, updateResp
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == 204 {
|
||||
slog.Debug("check update response 204 (current version is up to date)")
|
||||
return false, updateResp
|
||||
}
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("failed to read body response: %s", err))
|
||||
}
|
||||
err = json.Unmarshal(body, &updateResp)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("malformed response checking for update: %s", err))
|
||||
return false, updateResp
|
||||
}
|
||||
// Extract the version string from the URL in the github release artifact path
|
||||
updateResp.UpdateVersion = path.Base(path.Dir(updateResp.UpdateURL))
|
||||
|
||||
slog.Info("New update available at " + updateResp.UpdateURL)
|
||||
return true, updateResp
|
||||
}
|
||||
|
||||
func DownloadNewRelease(ctx context.Context, updateResp UpdateResponse) error {
|
||||
// Do a head first to check etag info
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, updateResp.UpdateURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := getClient(req)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking update: %w", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
etag := strings.Trim(resp.Header.Get("etag"), "\"")
|
||||
if etag == "" {
|
||||
slog.Debug("no etag detected, falling back to filename based dedup")
|
||||
etag = "_"
|
||||
}
|
||||
filename := Installer
|
||||
_, params, err := mime.ParseMediaType(resp.Header.Get("content-disposition"))
|
||||
if err == nil {
|
||||
filename = params["filename"]
|
||||
}
|
||||
|
||||
stageFilename := filepath.Join(UpdateStageDir, etag, filename)
|
||||
|
||||
// Check to see if we already have it downloaded
|
||||
_, err = os.Stat(stageFilename)
|
||||
if err == nil {
|
||||
slog.Info("update already downloaded")
|
||||
return nil
|
||||
}
|
||||
|
||||
cleanupOldDownloads()
|
||||
|
||||
req.Method = http.MethodGet
|
||||
resp, err = client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking update: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
etag = strings.Trim(resp.Header.Get("etag"), "\"")
|
||||
if etag == "" {
|
||||
slog.Debug("no etag detected, falling back to filename based dedup") // TODO probably can get rid of this redundant log
|
||||
etag = "_"
|
||||
}
|
||||
|
||||
stageFilename = filepath.Join(UpdateStageDir, etag, filename)
|
||||
|
||||
_, err = os.Stat(filepath.Dir(stageFilename))
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(filepath.Dir(stageFilename), 0o755); err != nil {
|
||||
return fmt.Errorf("create ollama dir %s: %v", filepath.Dir(stageFilename), err)
|
||||
}
|
||||
}
|
||||
|
||||
payload, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read body response: %w", err)
|
||||
}
|
||||
fp, err := os.OpenFile(stageFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write payload %s: %w", stageFilename, err)
|
||||
}
|
||||
defer fp.Close()
|
||||
if n, err := fp.Write(payload); err != nil || n != len(payload) {
|
||||
return fmt.Errorf("write payload %s: %d vs %d -- %w", stageFilename, n, len(payload), err)
|
||||
}
|
||||
slog.Info("new update downloaded " + stageFilename)
|
||||
|
||||
UpdateDownloaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupOldDownloads() {
|
||||
files, err := os.ReadDir(UpdateStageDir)
|
||||
if err != nil && errors.Is(err, os.ErrNotExist) {
|
||||
// Expected behavior on first run
|
||||
return
|
||||
} else if err != nil {
|
||||
slog.Warn(fmt.Sprintf("failed to list stage dir: %s", err))
|
||||
return
|
||||
}
|
||||
for _, file := range files {
|
||||
fullname := filepath.Join(UpdateStageDir, file.Name())
|
||||
slog.Debug("cleaning up old download: " + fullname)
|
||||
err = os.RemoveAll(fullname)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("failed to cleanup stale update download %s", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func StartBackgroundUpdaterChecker(ctx context.Context, cb func(string) error) {
|
||||
go func() {
|
||||
// Don't blast an update message immediately after startup
|
||||
// time.Sleep(30 * time.Second)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
for {
|
||||
available, resp := IsNewReleaseAvailable(ctx)
|
||||
if available {
|
||||
err := DownloadNewRelease(ctx, resp)
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to download new release: %s", err))
|
||||
}
|
||||
err = cb(resp.UpdateVersion)
|
||||
if err != nil {
|
||||
slog.Warn(fmt.Sprintf("failed to register update available with tray: %s", err))
|
||||
}
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
slog.Debug("stopping background update checker")
|
||||
return
|
||||
default:
|
||||
time.Sleep(UpdateCheckInterval)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
@@ -1,12 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
||||
return fmt.Errorf("DoUpgrade not yet implemented")
|
||||
}
|
@@ -1,80 +0,0 @@
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
||||
files, err := filepath.Glob(filepath.Join(UpdateStageDir, "*", "*.exe")) // TODO generalize for multiplatform
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to lookup downloads: %s", err)
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return fmt.Errorf("no update downloads found")
|
||||
} else if len(files) > 1 {
|
||||
// Shouldn't happen
|
||||
slog.Warn(fmt.Sprintf("multiple downloads found, using first one %v", files))
|
||||
}
|
||||
installerExe := files[0]
|
||||
|
||||
slog.Info("starting upgrade with " + installerExe)
|
||||
slog.Info("upgrade log file " + UpgradeLogFile)
|
||||
|
||||
// When running in debug mode, we'll be "verbose" and let the installer pop up and prompt
|
||||
installArgs := []string{
|
||||
"/CLOSEAPPLICATIONS", // Quit the tray app if it's still running
|
||||
"/LOG=" + filepath.Base(UpgradeLogFile), // Only relative seems reliable, so set pwd
|
||||
"/FORCECLOSEAPPLICATIONS", // Force close the tray app - might be needed
|
||||
}
|
||||
// When we're not in debug mode, make the upgrade as quiet as possible (no GUI, no prompts)
|
||||
// TODO - temporarily disable since we're pinning in debug mode for the preview
|
||||
// if debug := os.Getenv("OLLAMA_DEBUG"); debug == "" {
|
||||
installArgs = append(installArgs,
|
||||
"/SP", // Skip the "This will install... Do you wish to continue" prompt
|
||||
"/SUPPRESSMSGBOXES",
|
||||
"/SILENT",
|
||||
"/VERYSILENT",
|
||||
)
|
||||
// }
|
||||
|
||||
// Safeguard in case we have requests in flight that need to drain...
|
||||
slog.Info("Waiting for server to shutdown")
|
||||
cancel()
|
||||
if done != nil {
|
||||
<-done
|
||||
} else {
|
||||
// Shouldn't happen
|
||||
slog.Warn("done chan was nil, not actually waiting")
|
||||
}
|
||||
|
||||
slog.Debug(fmt.Sprintf("starting installer: %s %v", installerExe, installArgs))
|
||||
os.Chdir(filepath.Dir(UpgradeLogFile)) //nolint:errcheck
|
||||
cmd := exec.Command(installerExe, installArgs...)
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("unable to start ollama app %w", err)
|
||||
}
|
||||
|
||||
if cmd.Process != nil {
|
||||
err = cmd.Process.Release()
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to release server process: %s", err))
|
||||
}
|
||||
} else {
|
||||
// TODO - some details about why it didn't start, or is this a pedantic error case?
|
||||
return fmt.Errorf("installer process did not start")
|
||||
}
|
||||
|
||||
// TODO should we linger for a moment and check to make sure it's actually running by checking the pid?
|
||||
|
||||
slog.Info("Installer started in background, exiting")
|
||||
|
||||
os.Exit(0)
|
||||
// Not reached
|
||||
return nil
|
||||
}
|
12
app/main.go
@@ -1,12 +0,0 @@
|
||||
package main
|
||||
|
||||
// Compile with the following to get rid of the cmd pop up on windows
|
||||
// go build -ldflags="-H windowsgui" .
|
||||
|
||||
import (
|
||||
"github.com/jmorganca/ollama/app/lifecycle"
|
||||
)
|
||||
|
||||
func main() {
|
||||
lifecycle.Run()
|
||||
}
|
153
app/ollama.iss
@@ -1,153 +0,0 @@
|
||||
; Inno Setup Installer for Ollama
|
||||
;
|
||||
; To build the installer use the build script invoked from the top of the source tree
|
||||
;
|
||||
; powershell -ExecutionPolicy Bypass -File .\scripts\build_windows.ps
|
||||
|
||||
|
||||
#define MyAppName "Ollama"
|
||||
#if GetEnv("PKG_VERSION") != ""
|
||||
#define MyAppVersion GetEnv("PKG_VERSION")
|
||||
#else
|
||||
#define MyAppVersion "0.0.0"
|
||||
#endif
|
||||
#define MyAppPublisher "Ollama"
|
||||
#define MyAppURL "https://ollama.com/"
|
||||
#define MyAppExeName "ollama app.exe"
|
||||
#define MyIcon ".\assets\app.ico"
|
||||
|
||||
[Setup]
|
||||
; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications.
|
||||
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
|
||||
AppId={{44E83376-CE68-45EB-8FC1-393500EB558C}
|
||||
AppName={#MyAppName}
|
||||
AppVersion={#MyAppVersion}
|
||||
VersionInfoVersion={#MyAppVersion}
|
||||
;AppVerName={#MyAppName} {#MyAppVersion}
|
||||
AppPublisher={#MyAppPublisher}
|
||||
AppPublisherURL={#MyAppURL}
|
||||
AppSupportURL={#MyAppURL}
|
||||
AppUpdatesURL={#MyAppURL}
|
||||
ArchitecturesAllowed=x64
|
||||
ArchitecturesInstallIn64BitMode=x64
|
||||
DefaultDirName={localappdata}\Programs\{#MyAppName}
|
||||
DefaultGroupName={#MyAppName}
|
||||
DisableProgramGroupPage=yes
|
||||
PrivilegesRequired=lowest
|
||||
OutputBaseFilename="OllamaSetup"
|
||||
SetupIconFile={#MyIcon}
|
||||
UninstallDisplayIcon={uninstallexe}
|
||||
Compression=lzma2
|
||||
SolidCompression=no
|
||||
WizardStyle=modern
|
||||
ChangesEnvironment=yes
|
||||
OutputDir=..\dist\
|
||||
|
||||
; Disable logging once everything's battle tested
|
||||
; Filename will be %TEMP%\Setup Log*.txt
|
||||
SetupLogging=yes
|
||||
CloseApplications=yes
|
||||
RestartApplications=no
|
||||
|
||||
; Make sure they can at least download llama2 as a minimum
|
||||
ExtraDiskSpaceRequired=3826806784
|
||||
|
||||
; https://jrsoftware.org/ishelp/index.php?topic=setup_wizardimagefile
|
||||
WizardSmallImageFile=.\assets\setup.bmp
|
||||
|
||||
; TODO verifty actual min windows version...
|
||||
; OG Win 10
|
||||
MinVersion=10.0.10240
|
||||
|
||||
; First release that supports WinRT UI Composition for win32 apps
|
||||
; MinVersion=10.0.17134
|
||||
; First release with XAML Islands - possible UI path forward
|
||||
; MinVersion=10.0.18362
|
||||
|
||||
; quiet...
|
||||
DisableDirPage=yes
|
||||
DisableFinishedPage=yes
|
||||
DisableReadyMemo=yes
|
||||
DisableReadyPage=yes
|
||||
DisableStartupPrompt=yes
|
||||
DisableWelcomePage=yes
|
||||
|
||||
; TODO - percentage can't be set less than 100, so how to make it shorter?
|
||||
; WizardSizePercent=100,80
|
||||
|
||||
#if GetEnv("KEY_CONTAINER")
|
||||
SignTool=MySignTool
|
||||
SignedUninstaller=yes
|
||||
#endif
|
||||
|
||||
SetupMutex=OllamaSetupMutex
|
||||
|
||||
[Languages]
|
||||
Name: "english"; MessagesFile: "compiler:Default.isl"
|
||||
|
||||
[LangOptions]
|
||||
DialogFontSize=12
|
||||
|
||||
[Files]
|
||||
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
||||
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||
Source: "..\dist\windeps\*.dll"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
||||
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
||||
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
||||
|
||||
[Icons]
|
||||
Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||
Name: "{userstartup}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||
Name: "{userprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
||||
|
||||
[Run]
|
||||
Filename: "{cmd}"; Parameters: "/C set PATH={app};%PATH% & ""{app}\{#MyAppExeName}"""; Flags: postinstall nowait runhidden
|
||||
|
||||
[UninstallRun]
|
||||
; Filename: "{cmd}"; Parameters: "/C ""taskkill /im ''{#MyAppExeName}'' /f /t"; Flags: runhidden
|
||||
; Filename: "{cmd}"; Parameters: "/C ""taskkill /im ollama.exe /f /t"; Flags: runhidden
|
||||
Filename: "taskkill"; Parameters: "/im ""{#MyAppExeName}"" /f /t"; Flags: runhidden
|
||||
Filename: "taskkill"; Parameters: "/im ""ollama.exe"" /f /t"; Flags: runhidden
|
||||
; HACK! need to give the server and app enough time to exit
|
||||
; TODO - convert this to a Pascal code script so it waits until they're no longer running, then completes
|
||||
Filename: "{cmd}"; Parameters: "/c timeout 5"; Flags: runhidden
|
||||
|
||||
[UninstallDelete]
|
||||
Type: filesandordirs; Name: "{%TEMP}\ollama*"
|
||||
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Ollama"
|
||||
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama"
|
||||
Type: filesandordirs; Name: "{%USERPROFILE}\.ollama"
|
||||
; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved
|
||||
|
||||
[Messages]
|
||||
WizardReady=Ollama Windows Preview
|
||||
ReadyLabel1=%nLet's get you up and running with your own large language models.
|
||||
SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or finish the other installer, then click OK to continue with this install, or Cancel to exit.
|
||||
|
||||
|
||||
;FinishedHeadingLabel=Run your first model
|
||||
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama2
|
||||
;ClickFinish=%n
|
||||
|
||||
[Registry]
|
||||
Root: HKCU; Subkey: "Environment"; \
|
||||
ValueType: expandsz; ValueName: "Path"; ValueData: "{olddata};{app}"; \
|
||||
Check: NeedsAddPath('{app}')
|
||||
|
||||
[Code]
|
||||
|
||||
function NeedsAddPath(Param: string): boolean;
|
||||
var
|
||||
OrigPath: string;
|
||||
begin
|
||||
if not RegQueryStringValue(HKEY_CURRENT_USER,
|
||||
'Environment',
|
||||
'Path', OrigPath)
|
||||
then begin
|
||||
Result := True;
|
||||
exit;
|
||||
end;
|
||||
{ look for the path with leading and trailing semicolon }
|
||||
{ Pos() returns 0 if not found }
|
||||
Result := Pos(';' + ExpandConstant(Param) + ';', ';' + OrigPath + ';') = 0;
|
||||
end;
|
@@ -1,29 +0,0 @@
|
||||
#include <winver.h>
|
||||
|
||||
VS_VERSION_INFO VERSIONINFO
|
||||
FILEFLAGSMASK 0x3fL
|
||||
#ifdef _DEBUG
|
||||
FILEFLAGS 0x1L
|
||||
#else
|
||||
FILEFLAGS 0x0L
|
||||
#endif
|
||||
FILEOS 0x40004L
|
||||
FILETYPE 0x1L
|
||||
FILESUBTYPE 0x0L
|
||||
BEGIN
|
||||
BLOCK "StringFileInfo"
|
||||
BEGIN
|
||||
BLOCK "040904b0"
|
||||
BEGIN
|
||||
VALUE "FileDescription", "Ollama"
|
||||
VALUE "InternalName", "Ollama"
|
||||
VALUE "OriginalFilename", "ollama app.exe"
|
||||
VALUE "ProductName", "Ollama"
|
||||
END
|
||||
END
|
||||
|
||||
BLOCK "VarFileInfo"
|
||||
BEGIN
|
||||
VALUE "Translation", 0x409, 1200
|
||||
END
|
||||
END
|
@@ -1,8 +0,0 @@
|
||||
# TODO - consider ANSI colors and maybe ASCII art...
|
||||
write-host ""
|
||||
write-host "Welcome to Ollama!"
|
||||
write-host ""
|
||||
write-host "Run your first model:"
|
||||
write-host ""
|
||||
write-host "`tollama run llama2"
|
||||
write-host ""
|
990
macapp/package-lock.json → app/package-lock.json
generated
@@ -46,7 +46,7 @@
|
||||
"chmodr": "^1.2.0",
|
||||
"copy-webpack-plugin": "^11.0.0",
|
||||
"css-loader": "^6.8.1",
|
||||
"electron": "25.9.2",
|
||||
"electron": "25.2.0",
|
||||
"eslint": "^8.43.0",
|
||||
"eslint-plugin-import": "^2.27.5",
|
||||
"fork-ts-checker-webpack-plugin": "^7.3.0",
|
@@ -162,56 +162,13 @@ app.on('before-quit', () => {
|
||||
}
|
||||
})
|
||||
|
||||
const updateURL = `https://ollama.ai/api/update?os=${process.platform}&arch=${
|
||||
process.arch
|
||||
}&version=${app.getVersion()}&id=${id()}`
|
||||
|
||||
let latest = ''
|
||||
async function isNewReleaseAvailable() {
|
||||
try {
|
||||
const response = await fetch(updateURL)
|
||||
|
||||
if (!response.ok) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (response.status === 204) {
|
||||
return false
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
|
||||
const url = data?.url
|
||||
if (!url) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (latest === url) {
|
||||
return false
|
||||
}
|
||||
|
||||
latest = url
|
||||
|
||||
return true
|
||||
} catch (error) {
|
||||
logger.error(`update check failed - ${error}`)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
async function checkUpdate() {
|
||||
const available = await isNewReleaseAvailable()
|
||||
if (available) {
|
||||
logger.info('checking for update')
|
||||
autoUpdater.checkForUpdates()
|
||||
}
|
||||
}
|
||||
|
||||
function init() {
|
||||
if (app.isPackaged) {
|
||||
checkUpdate()
|
||||
autoUpdater.checkForUpdates()
|
||||
setInterval(() => {
|
||||
checkUpdate()
|
||||
if (!updateAvailable) {
|
||||
autoUpdater.checkForUpdates()
|
||||
}
|
||||
}, 60 * 60 * 1000)
|
||||
}
|
||||
|
||||
@@ -289,7 +246,11 @@ function id(): string {
|
||||
return uuid
|
||||
}
|
||||
|
||||
autoUpdater.setFeedURL({ url: updateURL })
|
||||
autoUpdater.setFeedURL({
|
||||
url: `https://ollama.ai/api/update?os=${process.platform}&arch=${
|
||||
process.arch
|
||||
}&version=${app.getVersion()}&id=${id()}`,
|
||||
})
|
||||
|
||||
autoUpdater.on('error', e => {
|
||||
logger.error(`update check failed - ${e.message}`)
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
@@ -1,98 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Store struct {
|
||||
ID string `json:"id"`
|
||||
FirstTimeRun bool `json:"first-time-run"`
|
||||
}
|
||||
|
||||
var (
|
||||
lock sync.Mutex
|
||||
store Store
|
||||
)
|
||||
|
||||
func GetID() string {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if store.ID == "" {
|
||||
initStore()
|
||||
}
|
||||
return store.ID
|
||||
|
||||
}
|
||||
|
||||
func GetFirstTimeRun() bool {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if store.ID == "" {
|
||||
initStore()
|
||||
}
|
||||
return store.FirstTimeRun
|
||||
}
|
||||
|
||||
func SetFirstTimeRun(val bool) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
if store.FirstTimeRun == val {
|
||||
return
|
||||
}
|
||||
store.FirstTimeRun = val
|
||||
writeStore(getStorePath())
|
||||
}
|
||||
|
||||
// lock must be held
|
||||
func initStore() {
|
||||
storeFile, err := os.Open(getStorePath())
|
||||
if err == nil {
|
||||
defer storeFile.Close()
|
||||
err = json.NewDecoder(storeFile).Decode(&store)
|
||||
if err == nil {
|
||||
slog.Debug(fmt.Sprintf("loaded existing store %s - ID: %s", getStorePath(), store.ID))
|
||||
return
|
||||
}
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
slog.Debug(fmt.Sprintf("unexpected error searching for store: %s", err))
|
||||
}
|
||||
slog.Debug("initializing new store")
|
||||
store.ID = uuid.New().String()
|
||||
writeStore(getStorePath())
|
||||
}
|
||||
|
||||
func writeStore(storeFilename string) {
|
||||
ollamaDir := filepath.Dir(storeFilename)
|
||||
_, err := os.Stat(ollamaDir)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(ollamaDir, 0o755); err != nil {
|
||||
slog.Error(fmt.Sprintf("create ollama dir %s: %v", ollamaDir, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
payload, err := json.Marshal(store)
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to marshal store: %s", err))
|
||||
return
|
||||
}
|
||||
fp, err := os.OpenFile(storeFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("write store payload %s: %v", storeFilename, err))
|
||||
return
|
||||
}
|
||||
defer fp.Close()
|
||||
if n, err := fp.Write(payload); err != nil || n != len(payload) {
|
||||
slog.Error(fmt.Sprintf("write store payload %s: %d vs %d -- %v", storeFilename, n, len(payload), err))
|
||||
return
|
||||
}
|
||||
slog.Debug("Store contents: " + string(payload))
|
||||
slog.Info(fmt.Sprintf("wrote store: %s", storeFilename))
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func getStorePath() string {
|
||||
// TODO - system wide location?
|
||||
|
||||
home := os.Getenv("HOME")
|
||||
return filepath.Join(home, "Library", "Application Support", "Ollama", "config.json")
|
||||
}
|
@@ -1,16 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func getStorePath() string {
|
||||
if os.Geteuid() == 0 {
|
||||
// TODO where should we store this on linux for system-wide operation?
|
||||
return "/etc/ollama/config.json"
|
||||
}
|
||||
|
||||
home := os.Getenv("HOME")
|
||||
return filepath.Join(home, ".ollama", "config.json")
|
||||
}
|
@@ -1,11 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func getStorePath() string {
|
||||
localAppData := os.Getenv("LOCALAPPDATA")
|
||||
return filepath.Join(localAppData, "Ollama", "config.json")
|
||||
}
|
@@ -1,24 +0,0 @@
|
||||
package commontray
|
||||
|
||||
var (
|
||||
Title = "Ollama"
|
||||
ToolTip = "Ollama"
|
||||
|
||||
UpdateIconName = "tray_upgrade"
|
||||
IconName = "tray"
|
||||
)
|
||||
|
||||
type Callbacks struct {
|
||||
Quit chan struct{}
|
||||
Update chan struct{}
|
||||
DoFirstUse chan struct{}
|
||||
ShowLogs chan struct{}
|
||||
}
|
||||
|
||||
type OllamaTray interface {
|
||||
GetCallbacks() Callbacks
|
||||
Run()
|
||||
UpdateAvailable(ver string) error
|
||||
DisplayFirstUseNotification() error
|
||||
Quit()
|
||||
}
|
@@ -1,33 +0,0 @@
|
||||
package tray
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/jmorganca/ollama/app/assets"
|
||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
||||
)
|
||||
|
||||
func NewTray() (commontray.OllamaTray, error) {
|
||||
extension := ".png"
|
||||
if runtime.GOOS == "windows" {
|
||||
extension = ".ico"
|
||||
}
|
||||
iconName := commontray.UpdateIconName + extension
|
||||
updateIcon, err := assets.GetIcon(iconName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load icon %s: %w", iconName, err)
|
||||
}
|
||||
iconName = commontray.IconName + extension
|
||||
icon, err := assets.GetIcon(iconName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load icon %s: %w", iconName, err)
|
||||
}
|
||||
|
||||
tray, err := InitPlatformTray(icon, updateIcon)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tray, nil
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package tray
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
||||
)
|
||||
|
||||
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {
|
||||
return nil, fmt.Errorf("NOT IMPLEMENTED YET")
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
package tray
|
||||
|
||||
import (
|
||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
||||
"github.com/jmorganca/ollama/app/tray/wintray"
|
||||
)
|
||||
|
||||
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {
|
||||
return wintray.InitTray(icon, updateIcon)
|
||||
}
|
@@ -1,184 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package wintray
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
quitOnce sync.Once
|
||||
)
|
||||
|
||||
func (t *winTray) Run() {
|
||||
nativeLoop()
|
||||
}
|
||||
|
||||
func nativeLoop() {
|
||||
// Main message pump.
|
||||
slog.Debug("starting event handling loop")
|
||||
m := &struct {
|
||||
WindowHandle windows.Handle
|
||||
Message uint32
|
||||
Wparam uintptr
|
||||
Lparam uintptr
|
||||
Time uint32
|
||||
Pt point
|
||||
LPrivate uint32
|
||||
}{}
|
||||
for {
|
||||
ret, _, err := pGetMessage.Call(uintptr(unsafe.Pointer(m)), 0, 0, 0)
|
||||
|
||||
// If the function retrieves a message other than WM_QUIT, the return value is nonzero.
|
||||
// If the function retrieves the WM_QUIT message, the return value is zero.
|
||||
// If there is an error, the return value is -1
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644936(v=vs.85).aspx
|
||||
switch int32(ret) {
|
||||
case -1:
|
||||
slog.Error(fmt.Sprintf("get message failure: %v", err))
|
||||
return
|
||||
case 0:
|
||||
return
|
||||
default:
|
||||
pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||
pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WindowProc callback function that processes messages sent to a window.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms633573(v=vs.85).aspx
|
||||
func (t *winTray) wndProc(hWnd windows.Handle, message uint32, wParam, lParam uintptr) (lResult uintptr) {
|
||||
const (
|
||||
WM_RBUTTONUP = 0x0205
|
||||
WM_LBUTTONUP = 0x0202
|
||||
WM_COMMAND = 0x0111
|
||||
WM_ENDSESSION = 0x0016
|
||||
WM_CLOSE = 0x0010
|
||||
WM_DESTROY = 0x0002
|
||||
WM_MOUSEMOVE = 0x0200
|
||||
WM_LBUTTONDOWN = 0x0201
|
||||
)
|
||||
switch message {
|
||||
case WM_COMMAND:
|
||||
menuItemId := int32(wParam)
|
||||
// https://docs.microsoft.com/en-us/windows/win32/menurc/wm-command#menus
|
||||
switch menuItemId {
|
||||
case quitMenuID:
|
||||
select {
|
||||
case t.callbacks.Quit <- struct{}{}:
|
||||
// should not happen but in case not listening
|
||||
default:
|
||||
slog.Error("no listener on Quit")
|
||||
}
|
||||
case updateMenuID:
|
||||
select {
|
||||
case t.callbacks.Update <- struct{}{}:
|
||||
// should not happen but in case not listening
|
||||
default:
|
||||
slog.Error("no listener on Update")
|
||||
}
|
||||
case diagLogsMenuID:
|
||||
select {
|
||||
case t.callbacks.ShowLogs <- struct{}{}:
|
||||
// should not happen but in case not listening
|
||||
default:
|
||||
slog.Error("no listener on ShowLogs")
|
||||
}
|
||||
default:
|
||||
slog.Debug(fmt.Sprintf("Unexpected menu item id: %d", menuItemId))
|
||||
}
|
||||
case WM_CLOSE:
|
||||
boolRet, _, err := pDestroyWindow.Call(uintptr(t.window))
|
||||
if boolRet == 0 {
|
||||
slog.Error(fmt.Sprintf("failed to destroy window: %s", err))
|
||||
}
|
||||
err = t.wcex.unregister()
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to uregister windo %s", err))
|
||||
}
|
||||
case WM_DESTROY:
|
||||
// same as WM_ENDSESSION, but throws 0 exit code after all
|
||||
defer pPostQuitMessage.Call(uintptr(int32(0))) //nolint:errcheck
|
||||
fallthrough
|
||||
case WM_ENDSESSION:
|
||||
t.muNID.Lock()
|
||||
if t.nid != nil {
|
||||
err := t.nid.delete()
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to delete nid: %s", err))
|
||||
}
|
||||
}
|
||||
t.muNID.Unlock()
|
||||
case t.wmSystrayMessage:
|
||||
switch lParam {
|
||||
case WM_MOUSEMOVE, WM_LBUTTONDOWN:
|
||||
// Ignore these...
|
||||
case WM_RBUTTONUP, WM_LBUTTONUP:
|
||||
err := t.showMenu()
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to show menu: %s", err))
|
||||
}
|
||||
case 0x405: // TODO - how is this magic value derived for the notification left click
|
||||
if t.pendingUpdate {
|
||||
select {
|
||||
case t.callbacks.Update <- struct{}{}:
|
||||
// should not happen but in case not listening
|
||||
default:
|
||||
slog.Error("no listener on Update")
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case t.callbacks.DoFirstUse <- struct{}{}:
|
||||
// should not happen but in case not listening
|
||||
default:
|
||||
slog.Error("no listener on DoFirstUse")
|
||||
}
|
||||
}
|
||||
case 0x404: // Middle click or close notification
|
||||
// slog.Debug("doing nothing on close of first time notification")
|
||||
default:
|
||||
// 0x402 also seems common - what is it?
|
||||
slog.Debug(fmt.Sprintf("unmanaged app message, lParm: 0x%x", lParam))
|
||||
}
|
||||
case t.wmTaskbarCreated: // on explorer.exe restarts
|
||||
t.muNID.Lock()
|
||||
err := t.nid.add()
|
||||
if err != nil {
|
||||
slog.Error(fmt.Sprintf("failed to refresh the taskbar on explorer restart: %s", err))
|
||||
}
|
||||
t.muNID.Unlock()
|
||||
default:
|
||||
// Calls the default window procedure to provide default processing for any window messages that an application does not process.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms633572(v=vs.85).aspx
|
||||
lResult, _, _ = pDefWindowProc.Call(
|
||||
uintptr(hWnd),
|
||||
uintptr(message),
|
||||
uintptr(wParam),
|
||||
uintptr(lParam),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *winTray) Quit() {
|
||||
quitOnce.Do(quit)
|
||||
}
|
||||
|
||||
func quit() {
|
||||
boolRet, _, err := pPostMessage.Call(
|
||||
uintptr(wt.window),
|
||||
WM_CLOSE,
|
||||
0,
|
||||
0,
|
||||
)
|
||||
if boolRet == 0 {
|
||||
slog.Error(fmt.Sprintf("failed to post close message on shutdown %s", err))
|
||||
}
|
||||
}
|
@@ -1,71 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package wintray
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
updatAvailableMenuID = 1
|
||||
updateMenuID = updatAvailableMenuID + 1
|
||||
separatorMenuID = updateMenuID + 1
|
||||
diagLogsMenuID = separatorMenuID + 1
|
||||
diagSeparatorMenuID = diagLogsMenuID + 1
|
||||
quitMenuID = diagSeparatorMenuID + 1
|
||||
)
|
||||
|
||||
func (t *winTray) initMenus() error {
|
||||
if err := t.addOrUpdateMenuItem(diagLogsMenuID, 0, diagLogsMenuTitle, false); err != nil {
|
||||
return fmt.Errorf("unable to create menu entries %w\n", err)
|
||||
}
|
||||
if err := t.addSeparatorMenuItem(diagSeparatorMenuID, 0); err != nil {
|
||||
return fmt.Errorf("unable to create menu entries %w", err)
|
||||
}
|
||||
if err := t.addOrUpdateMenuItem(quitMenuID, 0, quitMenuTitle, false); err != nil {
|
||||
return fmt.Errorf("unable to create menu entries %w\n", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *winTray) UpdateAvailable(ver string) error {
|
||||
if !t.updateNotified {
|
||||
slog.Debug("updating menu and sending notification for new update")
|
||||
if err := t.addOrUpdateMenuItem(updatAvailableMenuID, 0, updateAvailableMenuTitle, true); err != nil {
|
||||
return fmt.Errorf("unable to create menu entries %w", err)
|
||||
}
|
||||
if err := t.addOrUpdateMenuItem(updateMenuID, 0, updateMenutTitle, false); err != nil {
|
||||
return fmt.Errorf("unable to create menu entries %w", err)
|
||||
}
|
||||
if err := t.addSeparatorMenuItem(separatorMenuID, 0); err != nil {
|
||||
return fmt.Errorf("unable to create menu entries %w", err)
|
||||
}
|
||||
iconFilePath, err := iconBytesToFilePath(wt.updateIcon)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write icon data to temp file: %w", err)
|
||||
}
|
||||
if err := wt.setIcon(iconFilePath); err != nil {
|
||||
return fmt.Errorf("unable to set icon: %w", err)
|
||||
}
|
||||
t.updateNotified = true
|
||||
|
||||
t.pendingUpdate = true
|
||||
// Now pop up the notification
|
||||
t.muNID.Lock()
|
||||
defer t.muNID.Unlock()
|
||||
copy(t.nid.InfoTitle[:], windows.StringToUTF16(updateTitle))
|
||||
copy(t.nid.Info[:], windows.StringToUTF16(fmt.Sprintf(updateMessage, ver)))
|
||||
t.nid.Flags |= NIF_INFO
|
||||
t.nid.Timeout = 10
|
||||
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
||||
err = t.nid.modify()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -1,15 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package wintray
|
||||
|
||||
const (
|
||||
firstTimeTitle = "Ollama is running"
|
||||
firstTimeMessage = "Click here to get started"
|
||||
updateTitle = "Update available"
|
||||
updateMessage = "Ollama version %s is ready to install"
|
||||
|
||||
quitMenuTitle = "Quit Ollama"
|
||||
updateAvailableMenuTitle = "An update is available"
|
||||
updateMenutTitle = "Restart to update"
|
||||
diagLogsMenuTitle = "View logs"
|
||||
)
|
@@ -1,66 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package wintray
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Contains information that the system needs to display notifications in the notification area.
|
||||
// Used by Shell_NotifyIcon.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/bb773352(v=vs.85).aspx
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159
|
||||
type notifyIconData struct {
|
||||
Size uint32
|
||||
Wnd windows.Handle
|
||||
ID, Flags, CallbackMessage uint32
|
||||
Icon windows.Handle
|
||||
Tip [128]uint16
|
||||
State, StateMask uint32
|
||||
Info [256]uint16
|
||||
// Timeout, Version uint32
|
||||
Timeout uint32
|
||||
|
||||
InfoTitle [64]uint16
|
||||
InfoFlags uint32
|
||||
GuidItem windows.GUID
|
||||
BalloonIcon windows.Handle
|
||||
}
|
||||
|
||||
func (nid *notifyIconData) add() error {
|
||||
const NIM_ADD = 0x00000000
|
||||
res, _, err := pShellNotifyIcon.Call(
|
||||
uintptr(NIM_ADD),
|
||||
uintptr(unsafe.Pointer(nid)),
|
||||
)
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nid *notifyIconData) modify() error {
|
||||
const NIM_MODIFY = 0x00000001
|
||||
res, _, err := pShellNotifyIcon.Call(
|
||||
uintptr(NIM_MODIFY),
|
||||
uintptr(unsafe.Pointer(nid)),
|
||||
)
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nid *notifyIconData) delete() error {
|
||||
const NIM_DELETE = 0x00000002
|
||||
res, _, err := pShellNotifyIcon.Call(
|
||||
uintptr(NIM_DELETE),
|
||||
uintptr(unsafe.Pointer(nid)),
|
||||
)
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -1,485 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package wintray
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Helpful sources: https://github.com/golang/exp/blob/master/shiny/driver/internal/win32
|
||||
|
||||
// Contains information about loaded resources
|
||||
type winTray struct {
|
||||
instance,
|
||||
icon,
|
||||
cursor,
|
||||
window windows.Handle
|
||||
|
||||
loadedImages map[string]windows.Handle
|
||||
muLoadedImages sync.RWMutex
|
||||
|
||||
// menus keeps track of the submenus keyed by the menu item ID, plus 0
|
||||
// which corresponds to the main popup menu.
|
||||
menus map[uint32]windows.Handle
|
||||
muMenus sync.RWMutex
|
||||
menuOf map[uint32]windows.Handle
|
||||
muMenuOf sync.RWMutex
|
||||
// menuItemIcons maintains the bitmap of each menu item (if applies). It's
|
||||
// needed to show the icon correctly when showing a previously hidden menu
|
||||
// item again.
|
||||
// menuItemIcons map[uint32]windows.Handle
|
||||
// muMenuItemIcons sync.RWMutex
|
||||
visibleItems map[uint32][]uint32
|
||||
muVisibleItems sync.RWMutex
|
||||
|
||||
nid *notifyIconData
|
||||
muNID sync.RWMutex
|
||||
wcex *wndClassEx
|
||||
|
||||
wmSystrayMessage,
|
||||
wmTaskbarCreated uint32
|
||||
|
||||
pendingUpdate bool
|
||||
updateNotified bool // Only pop up the notification once - TODO consider daily nag?
|
||||
// Callbacks
|
||||
callbacks commontray.Callbacks
|
||||
normalIcon []byte
|
||||
updateIcon []byte
|
||||
}
|
||||
|
||||
var wt winTray
|
||||
|
||||
func (t *winTray) GetCallbacks() commontray.Callbacks {
|
||||
return t.callbacks
|
||||
}
|
||||
|
||||
func InitTray(icon, updateIcon []byte) (*winTray, error) {
|
||||
wt.callbacks.Quit = make(chan struct{})
|
||||
wt.callbacks.Update = make(chan struct{})
|
||||
wt.callbacks.ShowLogs = make(chan struct{})
|
||||
wt.callbacks.DoFirstUse = make(chan struct{})
|
||||
wt.normalIcon = icon
|
||||
wt.updateIcon = updateIcon
|
||||
if err := wt.initInstance(); err != nil {
|
||||
return nil, fmt.Errorf("Unable to init instance: %w\n", err)
|
||||
}
|
||||
|
||||
if err := wt.createMenu(); err != nil {
|
||||
return nil, fmt.Errorf("Unable to create menu: %w\n", err)
|
||||
}
|
||||
|
||||
iconFilePath, err := iconBytesToFilePath(wt.normalIcon)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to write icon data to temp file: %w", err)
|
||||
}
|
||||
if err := wt.setIcon(iconFilePath); err != nil {
|
||||
return nil, fmt.Errorf("Unable to set icon: %w", err)
|
||||
}
|
||||
|
||||
return &wt, wt.initMenus()
|
||||
}
|
||||
|
||||
func (t *winTray) initInstance() error {
|
||||
const (
|
||||
className = "OllamaClass"
|
||||
windowName = ""
|
||||
)
|
||||
|
||||
t.wmSystrayMessage = WM_USER + 1
|
||||
t.visibleItems = make(map[uint32][]uint32)
|
||||
t.menus = make(map[uint32]windows.Handle)
|
||||
t.menuOf = make(map[uint32]windows.Handle)
|
||||
|
||||
t.loadedImages = make(map[string]windows.Handle)
|
||||
|
||||
taskbarEventNamePtr, _ := windows.UTF16PtrFromString("TaskbarCreated")
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644947
|
||||
res, _, err := pRegisterWindowMessage.Call(
|
||||
uintptr(unsafe.Pointer(taskbarEventNamePtr)),
|
||||
)
|
||||
if res == 0 { // success 0xc000-0xfff
|
||||
return fmt.Errorf("failed to register window: %w", err)
|
||||
}
|
||||
t.wmTaskbarCreated = uint32(res)
|
||||
|
||||
instanceHandle, _, err := pGetModuleHandle.Call(0)
|
||||
if instanceHandle == 0 {
|
||||
return err
|
||||
}
|
||||
t.instance = windows.Handle(instanceHandle)
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms648072(v=vs.85).aspx
|
||||
iconHandle, _, err := pLoadIcon.Call(0, uintptr(IDI_APPLICATION))
|
||||
if iconHandle == 0 {
|
||||
return err
|
||||
}
|
||||
t.icon = windows.Handle(iconHandle)
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms648391(v=vs.85).aspx
|
||||
cursorHandle, _, err := pLoadCursor.Call(0, uintptr(IDC_ARROW))
|
||||
if cursorHandle == 0 {
|
||||
return err
|
||||
}
|
||||
t.cursor = windows.Handle(cursorHandle)
|
||||
|
||||
classNamePtr, err := windows.UTF16PtrFromString(className)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
windowNamePtr, err := windows.UTF16PtrFromString(windowName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.wcex = &wndClassEx{
|
||||
Style: CS_HREDRAW | CS_VREDRAW,
|
||||
WndProc: windows.NewCallback(t.wndProc),
|
||||
Instance: t.instance,
|
||||
Icon: t.icon,
|
||||
Cursor: t.cursor,
|
||||
Background: windows.Handle(6), // (COLOR_WINDOW + 1)
|
||||
ClassName: classNamePtr,
|
||||
IconSm: t.icon,
|
||||
}
|
||||
if err := t.wcex.register(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
windowHandle, _, err := pCreateWindowEx.Call(
|
||||
uintptr(0),
|
||||
uintptr(unsafe.Pointer(classNamePtr)),
|
||||
uintptr(unsafe.Pointer(windowNamePtr)),
|
||||
uintptr(WS_OVERLAPPEDWINDOW),
|
||||
uintptr(CW_USEDEFAULT),
|
||||
uintptr(CW_USEDEFAULT),
|
||||
uintptr(CW_USEDEFAULT),
|
||||
uintptr(CW_USEDEFAULT),
|
||||
uintptr(0),
|
||||
uintptr(0),
|
||||
uintptr(t.instance),
|
||||
uintptr(0),
|
||||
)
|
||||
if windowHandle == 0 {
|
||||
return err
|
||||
}
|
||||
t.window = windows.Handle(windowHandle)
|
||||
|
||||
pShowWindow.Call(uintptr(t.window), uintptr(SW_HIDE)) //nolint:errcheck
|
||||
|
||||
boolRet, _, err := pUpdateWindow.Call(uintptr(t.window))
|
||||
if boolRet == 0 {
|
||||
slog.Error(fmt.Sprintf("failed to update window: %s", err))
|
||||
}
|
||||
|
||||
t.muNID.Lock()
|
||||
defer t.muNID.Unlock()
|
||||
t.nid = ¬ifyIconData{
|
||||
Wnd: windows.Handle(t.window),
|
||||
ID: 100,
|
||||
Flags: NIF_MESSAGE,
|
||||
CallbackMessage: t.wmSystrayMessage,
|
||||
}
|
||||
t.nid.Size = uint32(unsafe.Sizeof(*t.nid))
|
||||
|
||||
return t.nid.add()
|
||||
}
|
||||
|
||||
func (t *winTray) createMenu() error {
|
||||
|
||||
menuHandle, _, err := pCreatePopupMenu.Call()
|
||||
if menuHandle == 0 {
|
||||
return err
|
||||
}
|
||||
t.menus[0] = windows.Handle(menuHandle)
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647575(v=vs.85).aspx
|
||||
mi := struct {
|
||||
Size, Mask, Style, Max uint32
|
||||
Background windows.Handle
|
||||
ContextHelpID uint32
|
||||
MenuData uintptr
|
||||
}{
|
||||
Mask: MIM_APPLYTOSUBMENUS,
|
||||
}
|
||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||
|
||||
res, _, err := pSetMenuInfo.Call(
|
||||
uintptr(t.menus[0]),
|
||||
uintptr(unsafe.Pointer(&mi)),
|
||||
)
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Contains information about a menu item.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647578(v=vs.85).aspx
|
||||
type menuItemInfo struct {
|
||||
Size, Mask, Type, State uint32
|
||||
ID uint32
|
||||
SubMenu, Checked, Unchecked windows.Handle
|
||||
ItemData uintptr
|
||||
TypeData *uint16
|
||||
Cch uint32
|
||||
BMPItem windows.Handle
|
||||
}
|
||||
|
||||
func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title string, disabled bool) error {
|
||||
titlePtr, err := windows.UTF16PtrFromString(title)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mi := menuItemInfo{
|
||||
Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE,
|
||||
Type: MFT_STRING,
|
||||
ID: uint32(menuItemId),
|
||||
TypeData: titlePtr,
|
||||
Cch: uint32(len(title)),
|
||||
}
|
||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||
if disabled {
|
||||
mi.State |= MFS_DISABLED
|
||||
}
|
||||
|
||||
var res uintptr
|
||||
t.muMenus.RLock()
|
||||
menu := t.menus[parentId]
|
||||
t.muMenus.RUnlock()
|
||||
if t.getVisibleItemIndex(parentId, menuItemId) != -1 {
|
||||
// We set the menu item info based on the menuID
|
||||
boolRet, _, err := pSetMenuItemInfo.Call(
|
||||
uintptr(menu),
|
||||
uintptr(menuItemId),
|
||||
0,
|
||||
uintptr(unsafe.Pointer(&mi)),
|
||||
)
|
||||
if boolRet == 0 {
|
||||
return fmt.Errorf("failed to set menu item: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if res == 0 {
|
||||
// Menu item does not already exist, create it
|
||||
t.muMenus.RLock()
|
||||
submenu, exists := t.menus[menuItemId]
|
||||
t.muMenus.RUnlock()
|
||||
if exists {
|
||||
mi.Mask |= MIIM_SUBMENU
|
||||
mi.SubMenu = submenu
|
||||
}
|
||||
t.addToVisibleItems(parentId, menuItemId)
|
||||
position := t.getVisibleItemIndex(parentId, menuItemId)
|
||||
res, _, err = pInsertMenuItem.Call(
|
||||
uintptr(menu),
|
||||
uintptr(position),
|
||||
1,
|
||||
uintptr(unsafe.Pointer(&mi)),
|
||||
)
|
||||
if res == 0 {
|
||||
t.delFromVisibleItems(parentId, menuItemId)
|
||||
return err
|
||||
}
|
||||
t.muMenuOf.Lock()
|
||||
t.menuOf[menuItemId] = menu
|
||||
t.muMenuOf.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error {
|
||||
|
||||
mi := menuItemInfo{
|
||||
Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE,
|
||||
Type: MFT_SEPARATOR,
|
||||
ID: uint32(menuItemId),
|
||||
}
|
||||
|
||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||
|
||||
t.addToVisibleItems(parentId, menuItemId)
|
||||
position := t.getVisibleItemIndex(parentId, menuItemId)
|
||||
t.muMenus.RLock()
|
||||
menu := uintptr(t.menus[parentId])
|
||||
t.muMenus.RUnlock()
|
||||
res, _, err := pInsertMenuItem.Call(
|
||||
menu,
|
||||
uintptr(position),
|
||||
1,
|
||||
uintptr(unsafe.Pointer(&mi)),
|
||||
)
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// func (t *winTray) hideMenuItem(menuItemId, parentId uint32) error {
|
||||
// const ERROR_SUCCESS syscall.Errno = 0
|
||||
|
||||
// t.muMenus.RLock()
|
||||
// menu := uintptr(t.menus[parentId])
|
||||
// t.muMenus.RUnlock()
|
||||
// res, _, err := pRemoveMenu.Call(
|
||||
// menu,
|
||||
// uintptr(menuItemId),
|
||||
// MF_BYCOMMAND,
|
||||
// )
|
||||
// if res == 0 && err.(syscall.Errno) != ERROR_SUCCESS {
|
||||
// return err
|
||||
// }
|
||||
// t.delFromVisibleItems(parentId, menuItemId)
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
func (t *winTray) showMenu() error {
|
||||
p := point{}
|
||||
boolRet, _, err := pGetCursorPos.Call(uintptr(unsafe.Pointer(&p)))
|
||||
if boolRet == 0 {
|
||||
return err
|
||||
}
|
||||
boolRet, _, err = pSetForegroundWindow.Call(uintptr(t.window))
|
||||
if boolRet == 0 {
|
||||
slog.Warn(fmt.Sprintf("failed to bring menu to foreground: %s", err))
|
||||
}
|
||||
|
||||
boolRet, _, err = pTrackPopupMenu.Call(
|
||||
uintptr(t.menus[0]),
|
||||
TPM_BOTTOMALIGN|TPM_LEFTALIGN,
|
||||
uintptr(p.X),
|
||||
uintptr(p.Y),
|
||||
0,
|
||||
uintptr(t.window),
|
||||
0,
|
||||
)
|
||||
if boolRet == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *winTray) delFromVisibleItems(parent, val uint32) {
|
||||
t.muVisibleItems.Lock()
|
||||
defer t.muVisibleItems.Unlock()
|
||||
visibleItems := t.visibleItems[parent]
|
||||
for i, itemval := range visibleItems {
|
||||
if val == itemval {
|
||||
t.visibleItems[parent] = append(visibleItems[:i], visibleItems[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *winTray) addToVisibleItems(parent, val uint32) {
|
||||
t.muVisibleItems.Lock()
|
||||
defer t.muVisibleItems.Unlock()
|
||||
if visibleItems, exists := t.visibleItems[parent]; !exists {
|
||||
t.visibleItems[parent] = []uint32{val}
|
||||
} else {
|
||||
newvisible := append(visibleItems, val)
|
||||
sort.Slice(newvisible, func(i, j int) bool { return newvisible[i] < newvisible[j] })
|
||||
t.visibleItems[parent] = newvisible
|
||||
}
|
||||
}
|
||||
|
||||
func (t *winTray) getVisibleItemIndex(parent, val uint32) int {
|
||||
t.muVisibleItems.RLock()
|
||||
defer t.muVisibleItems.RUnlock()
|
||||
for i, itemval := range t.visibleItems[parent] {
|
||||
if val == itemval {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func iconBytesToFilePath(iconBytes []byte) (string, error) {
|
||||
bh := md5.Sum(iconBytes)
|
||||
dataHash := hex.EncodeToString(bh[:])
|
||||
iconFilePath := filepath.Join(os.TempDir(), "ollama_temp_icon_"+dataHash)
|
||||
|
||||
if _, err := os.Stat(iconFilePath); os.IsNotExist(err) {
|
||||
if err := os.WriteFile(iconFilePath, iconBytes, 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return iconFilePath, nil
|
||||
}
|
||||
|
||||
// Loads an image from file and shows it in tray.
|
||||
// Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx
|
||||
func (t *winTray) setIcon(src string) error {
|
||||
|
||||
h, err := t.loadIconFrom(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.muNID.Lock()
|
||||
defer t.muNID.Unlock()
|
||||
t.nid.Icon = h
|
||||
t.nid.Flags |= NIF_ICON
|
||||
t.nid.Size = uint32(unsafe.Sizeof(*t.nid))
|
||||
|
||||
return t.nid.modify()
|
||||
}
|
||||
|
||||
// Loads an image from file to be shown in tray or menu item.
|
||||
// LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx
|
||||
func (t *winTray) loadIconFrom(src string) (windows.Handle, error) {
|
||||
|
||||
// Save and reuse handles of loaded images
|
||||
t.muLoadedImages.RLock()
|
||||
h, ok := t.loadedImages[src]
|
||||
t.muLoadedImages.RUnlock()
|
||||
if !ok {
|
||||
srcPtr, err := windows.UTF16PtrFromString(src)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
res, _, err := pLoadImage.Call(
|
||||
0,
|
||||
uintptr(unsafe.Pointer(srcPtr)),
|
||||
IMAGE_ICON,
|
||||
0,
|
||||
0,
|
||||
LR_LOADFROMFILE|LR_DEFAULTSIZE,
|
||||
)
|
||||
if res == 0 {
|
||||
return 0, err
|
||||
}
|
||||
h = windows.Handle(res)
|
||||
t.muLoadedImages.Lock()
|
||||
t.loadedImages[src] = h
|
||||
t.muLoadedImages.Unlock()
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (t *winTray) DisplayFirstUseNotification() error {
|
||||
t.muNID.Lock()
|
||||
defer t.muNID.Unlock()
|
||||
copy(t.nid.InfoTitle[:], windows.StringToUTF16(firstTimeTitle))
|
||||
copy(t.nid.Info[:], windows.StringToUTF16(firstTimeMessage))
|
||||
t.nid.Flags |= NIF_INFO
|
||||
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
||||
|
||||
return t.nid.modify()
|
||||
}
|
@@ -1,89 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package wintray
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
k32 = windows.NewLazySystemDLL("Kernel32.dll")
|
||||
u32 = windows.NewLazySystemDLL("User32.dll")
|
||||
s32 = windows.NewLazySystemDLL("Shell32.dll")
|
||||
|
||||
pCreatePopupMenu = u32.NewProc("CreatePopupMenu")
|
||||
pCreateWindowEx = u32.NewProc("CreateWindowExW")
|
||||
pDefWindowProc = u32.NewProc("DefWindowProcW")
|
||||
pDestroyWindow = u32.NewProc("DestroyWindow")
|
||||
pDispatchMessage = u32.NewProc("DispatchMessageW")
|
||||
pGetCursorPos = u32.NewProc("GetCursorPos")
|
||||
pGetMessage = u32.NewProc("GetMessageW")
|
||||
pGetModuleHandle = k32.NewProc("GetModuleHandleW")
|
||||
pInsertMenuItem = u32.NewProc("InsertMenuItemW")
|
||||
pLoadCursor = u32.NewProc("LoadCursorW")
|
||||
pLoadIcon = u32.NewProc("LoadIconW")
|
||||
pLoadImage = u32.NewProc("LoadImageW")
|
||||
pPostMessage = u32.NewProc("PostMessageW")
|
||||
pPostQuitMessage = u32.NewProc("PostQuitMessage")
|
||||
pRegisterClass = u32.NewProc("RegisterClassExW")
|
||||
pRegisterWindowMessage = u32.NewProc("RegisterWindowMessageW")
|
||||
pSetForegroundWindow = u32.NewProc("SetForegroundWindow")
|
||||
pSetMenuInfo = u32.NewProc("SetMenuInfo")
|
||||
pSetMenuItemInfo = u32.NewProc("SetMenuItemInfoW")
|
||||
pShellNotifyIcon = s32.NewProc("Shell_NotifyIconW")
|
||||
pShowWindow = u32.NewProc("ShowWindow")
|
||||
pTrackPopupMenu = u32.NewProc("TrackPopupMenu")
|
||||
pTranslateMessage = u32.NewProc("TranslateMessage")
|
||||
pUnregisterClass = u32.NewProc("UnregisterClassW")
|
||||
pUpdateWindow = u32.NewProc("UpdateWindow")
|
||||
)
|
||||
|
||||
const (
|
||||
CS_HREDRAW = 0x0002
|
||||
CS_VREDRAW = 0x0001
|
||||
CW_USEDEFAULT = 0x80000000
|
||||
IDC_ARROW = 32512 // Standard arrow
|
||||
IDI_APPLICATION = 32512
|
||||
IMAGE_ICON = 1 // Loads an icon
|
||||
LR_DEFAULTSIZE = 0x00000040 // Loads default-size icon for windows(SM_CXICON x SM_CYICON) if cx, cy are set to zero
|
||||
LR_LOADFROMFILE = 0x00000010 // Loads the stand-alone image from the file
|
||||
MF_BYCOMMAND = 0x00000000
|
||||
MFS_DISABLED = 0x00000003
|
||||
MFT_SEPARATOR = 0x00000800
|
||||
MFT_STRING = 0x00000000
|
||||
MIIM_BITMAP = 0x00000080
|
||||
MIIM_FTYPE = 0x00000100
|
||||
MIIM_ID = 0x00000002
|
||||
MIIM_STATE = 0x00000001
|
||||
MIIM_STRING = 0x00000040
|
||||
MIIM_SUBMENU = 0x00000004
|
||||
MIM_APPLYTOSUBMENUS = 0x80000000
|
||||
NIF_ICON = 0x00000002
|
||||
NIF_INFO = 0x00000010
|
||||
NIF_MESSAGE = 0x00000001
|
||||
SW_HIDE = 0
|
||||
TPM_BOTTOMALIGN = 0x0020
|
||||
TPM_LEFTALIGN = 0x0000
|
||||
WM_CLOSE = 0x0010
|
||||
WM_USER = 0x0400
|
||||
WS_CAPTION = 0x00C00000
|
||||
WS_MAXIMIZEBOX = 0x00010000
|
||||
WS_MINIMIZEBOX = 0x00020000
|
||||
WS_OVERLAPPED = 0x00000000
|
||||
WS_OVERLAPPEDWINDOW = WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX
|
||||
WS_SYSMENU = 0x00080000
|
||||
WS_THICKFRAME = 0x00040000
|
||||
)
|
||||
|
||||
// Not sure if this is actually needed on windows
|
||||
func init() {
|
||||
runtime.LockOSThread()
|
||||
}
|
||||
|
||||
// The POINT structure defines the x- and y- coordinates of a point.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/dd162805(v=vs.85).aspx
|
||||
type point struct {
|
||||
X, Y int32
|
||||
}
|
@@ -1,45 +0,0 @@
|
||||
//go:build windows
|
||||
|
||||
package wintray
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Contains window class information.
|
||||
// It is used with the RegisterClassEx and GetClassInfoEx functions.
|
||||
// https://msdn.microsoft.com/en-us/library/ms633577.aspx
|
||||
type wndClassEx struct {
|
||||
Size, Style uint32
|
||||
WndProc uintptr
|
||||
ClsExtra, WndExtra int32
|
||||
Instance, Icon, Cursor, Background windows.Handle
|
||||
MenuName, ClassName *uint16
|
||||
IconSm windows.Handle
|
||||
}
|
||||
|
||||
// Registers a window class for subsequent use in calls to the CreateWindow or CreateWindowEx function.
|
||||
// https://msdn.microsoft.com/en-us/library/ms633587.aspx
|
||||
func (w *wndClassEx) register() error {
|
||||
w.Size = uint32(unsafe.Sizeof(*w))
|
||||
res, _, err := pRegisterClass.Call(uintptr(unsafe.Pointer(w)))
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unregisters a window class, freeing the memory required for the class.
|
||||
// https://msdn.microsoft.com/en-us/library/ms644899.aspx
|
||||
func (w *wndClassEx) unregister() error {
|
||||
res, _, err := pUnregisterClass.Call(
|
||||
uintptr(unsafe.Pointer(w.ClassName)),
|
||||
uintptr(w.Instance),
|
||||
)
|
||||
if res == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
61
auth/auth.go
@@ -1,61 +0,0 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const defaultPrivateKey = "id_ed25519"
|
||||
|
||||
func NewNonce(r io.Reader, length int) (string, error) {
|
||||
nonce := make([]byte, length)
|
||||
if _, err := io.ReadFull(r, nonce); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return base64.RawURLEncoding.EncodeToString(nonce), nil
|
||||
}
|
||||
|
||||
func Sign(ctx context.Context, bts []byte) (string, error) {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
keyPath := filepath.Join(home, ".ollama", defaultPrivateKey)
|
||||
|
||||
privateKeyFile, err := os.ReadFile(keyPath)
|
||||
if err != nil {
|
||||
slog.Info(fmt.Sprintf("Failed to load private key: %v", err))
|
||||
return "", err
|
||||
}
|
||||
|
||||
privateKey, err := ssh.ParsePrivateKey(privateKeyFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// get the pubkey, but remove the type
|
||||
publicKey := ssh.MarshalAuthorizedKey(privateKey.PublicKey())
|
||||
parts := bytes.Split(publicKey, []byte(" "))
|
||||
if len(parts) < 2 {
|
||||
return "", fmt.Errorf("malformed public key")
|
||||
}
|
||||
|
||||
signedData, err := privateKey.Sign(rand.Reader, bts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// signature is <pubkey>:<signature>
|
||||
return fmt.Sprintf("%s:%s", bytes.TrimSpace(parts[1]), base64.StdEncoding.EncodeToString(signedData.Blob)), nil
|
||||
}
|
936
cmd/cmd.go
@@ -1,663 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
"github.com/jmorganca/ollama/progress"
|
||||
"github.com/jmorganca/ollama/readline"
|
||||
)
|
||||
|
||||
type MultilineState int
|
||||
|
||||
const (
|
||||
MultilineNone MultilineState = iota
|
||||
MultilinePrompt
|
||||
MultilineSystem
|
||||
MultilineTemplate
|
||||
)
|
||||
|
||||
func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := progress.NewProgress(os.Stderr)
|
||||
defer p.StopAndClear()
|
||||
|
||||
spinner := progress.NewSpinner("")
|
||||
p.Add("", spinner)
|
||||
|
||||
showReq := api.ShowRequest{Name: opts.Model}
|
||||
showResp, err := client.Show(cmd.Context(), &showReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts.MultiModal = slices.Contains(showResp.Details.Families, "clip")
|
||||
opts.ParentModel = showResp.Details.ParentModel
|
||||
|
||||
if len(showResp.Messages) > 0 {
|
||||
opts.Messages = append(opts.Messages, showResp.Messages...)
|
||||
}
|
||||
|
||||
chatReq := &api.ChatRequest{
|
||||
Model: opts.Model,
|
||||
Messages: []api.Message{},
|
||||
}
|
||||
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
||||
p.StopAndClear()
|
||||
if len(opts.Messages) > 0 {
|
||||
for _, msg := range opts.Messages {
|
||||
switch msg.Role {
|
||||
case "user":
|
||||
fmt.Printf(">>> %s\n", msg.Content)
|
||||
case "assistant":
|
||||
state := &displayResponseState{}
|
||||
displayResponse(msg.Content, opts.WordWrap, state)
|
||||
fmt.Println()
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
opts.Messages = make([]api.Message, 0)
|
||||
|
||||
err := loadModel(cmd, &opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usage := func() {
|
||||
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||
fmt.Fprintln(os.Stderr, " /set Set session variables")
|
||||
fmt.Fprintln(os.Stderr, " /show Show model information")
|
||||
fmt.Fprintln(os.Stderr, " /load <model> Load a session or model")
|
||||
fmt.Fprintln(os.Stderr, " /save <model> Save your current session")
|
||||
fmt.Fprintln(os.Stderr, " /bye Exit")
|
||||
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
|
||||
fmt.Fprintln(os.Stderr, " /? shortcuts Help for keyboard shortcuts")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
fmt.Fprintln(os.Stderr, "Use \"\"\" to begin a multi-line message.")
|
||||
|
||||
if opts.MultiModal {
|
||||
fmt.Fprintf(os.Stderr, "Use %s to include .jpg or .png images.\n", filepath.FromSlash("/path/to/file"))
|
||||
}
|
||||
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
}
|
||||
|
||||
usageSet := func() {
|
||||
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter ... Set a parameter")
|
||||
fmt.Fprintln(os.Stderr, " /set system <string> Set system message")
|
||||
fmt.Fprintln(os.Stderr, " /set template <string> Set prompt template")
|
||||
fmt.Fprintln(os.Stderr, " /set history Enable history")
|
||||
fmt.Fprintln(os.Stderr, " /set nohistory Disable history")
|
||||
fmt.Fprintln(os.Stderr, " /set wordwrap Enable wordwrap")
|
||||
fmt.Fprintln(os.Stderr, " /set nowordwrap Disable wordwrap")
|
||||
fmt.Fprintln(os.Stderr, " /set format json Enable JSON mode")
|
||||
fmt.Fprintln(os.Stderr, " /set noformat Disable formatting")
|
||||
fmt.Fprintln(os.Stderr, " /set verbose Show LLM stats")
|
||||
fmt.Fprintln(os.Stderr, " /set quiet Disable LLM stats")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
}
|
||||
|
||||
usageShortcuts := func() {
|
||||
fmt.Fprintln(os.Stderr, "Available keyboard shortcuts:")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + a Move to the beginning of the line (Home)")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + e Move to the end of the line (End)")
|
||||
fmt.Fprintln(os.Stderr, " Alt + b Move back (left) one word")
|
||||
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + d Exit ollama (/bye)")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
}
|
||||
|
||||
usageShow := func() {
|
||||
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||
fmt.Fprintln(os.Stderr, " /show info Show details for this model")
|
||||
fmt.Fprintln(os.Stderr, " /show license Show model license")
|
||||
fmt.Fprintln(os.Stderr, " /show modelfile Show Modelfile for this model")
|
||||
fmt.Fprintln(os.Stderr, " /show parameters Show parameters for this model")
|
||||
fmt.Fprintln(os.Stderr, " /show system Show system message")
|
||||
fmt.Fprintln(os.Stderr, " /show template Show prompt template")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
}
|
||||
|
||||
// only list out the most common parameters
|
||||
usageParameters := func() {
|
||||
fmt.Fprintln(os.Stderr, "Available Parameters:")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter seed <int> Random number seed")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter num_predict <int> Max number of tokens to predict")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter top_k <int> Pick from top k num of tokens")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter top_p <float> Pick token based on sum of probabilities")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter num_ctx <int> Set the context size")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter temperature <float> Set creativity level")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter repeat_penalty <float> How strongly to penalize repetitions")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter repeat_last_n <int> Set how far back to look for repetitions")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter num_gpu <int> The number of layers to send to the GPU")
|
||||
fmt.Fprintln(os.Stderr, " /set parameter stop \"<string>\", ... Set the stop parameters")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
}
|
||||
|
||||
scanner, err := readline.New(readline.Prompt{
|
||||
Prompt: ">>> ",
|
||||
AltPrompt: "... ",
|
||||
Placeholder: "Send a message (/? for help)",
|
||||
AltPlaceholder: `Use """ to end multi-line input`,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Print(readline.StartBracketedPaste)
|
||||
defer fmt.Printf(readline.EndBracketedPaste)
|
||||
|
||||
var sb strings.Builder
|
||||
var multiline MultilineState
|
||||
|
||||
for {
|
||||
line, err := scanner.Readline()
|
||||
switch {
|
||||
case errors.Is(err, io.EOF):
|
||||
fmt.Println()
|
||||
return nil
|
||||
case errors.Is(err, readline.ErrInterrupt):
|
||||
if line == "" {
|
||||
fmt.Println("\nUse Ctrl + d or /bye to exit.")
|
||||
}
|
||||
|
||||
scanner.Prompt.UseAlt = false
|
||||
sb.Reset()
|
||||
|
||||
continue
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case multiline != MultilineNone:
|
||||
// check if there's a multiline terminating string
|
||||
before, ok := strings.CutSuffix(line, `"""`)
|
||||
sb.WriteString(before)
|
||||
if !ok {
|
||||
fmt.Fprintln(&sb)
|
||||
continue
|
||||
}
|
||||
|
||||
switch multiline {
|
||||
case MultilineSystem:
|
||||
opts.System = sb.String()
|
||||
opts.Messages = append(opts.Messages, api.Message{Role: "system", Content: opts.System})
|
||||
fmt.Println("Set system message.")
|
||||
sb.Reset()
|
||||
case MultilineTemplate:
|
||||
opts.Template = sb.String()
|
||||
fmt.Println("Set prompt template.")
|
||||
sb.Reset()
|
||||
}
|
||||
|
||||
multiline = MultilineNone
|
||||
scanner.Prompt.UseAlt = false
|
||||
case strings.HasPrefix(line, `"""`):
|
||||
line := strings.TrimPrefix(line, `"""`)
|
||||
line, ok := strings.CutSuffix(line, `"""`)
|
||||
sb.WriteString(line)
|
||||
if !ok {
|
||||
// no multiline terminating string; need more input
|
||||
fmt.Fprintln(&sb)
|
||||
multiline = MultilinePrompt
|
||||
scanner.Prompt.UseAlt = true
|
||||
}
|
||||
case scanner.Pasting:
|
||||
fmt.Fprintln(&sb, line)
|
||||
continue
|
||||
case strings.HasPrefix(line, "/list"):
|
||||
args := strings.Fields(line)
|
||||
if err := ListHandler(cmd, args[1:]); err != nil {
|
||||
return err
|
||||
}
|
||||
case strings.HasPrefix(line, "/load"):
|
||||
args := strings.Fields(line)
|
||||
if len(args) != 2 {
|
||||
fmt.Println("Usage:\n /load <modelname>")
|
||||
continue
|
||||
}
|
||||
opts.Model = args[1]
|
||||
opts.Messages = []api.Message{}
|
||||
fmt.Printf("Loading model '%s'\n", opts.Model)
|
||||
if err := loadModel(cmd, &opts); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
case strings.HasPrefix(line, "/save"):
|
||||
args := strings.Fields(line)
|
||||
if len(args) != 2 {
|
||||
fmt.Println("Usage:\n /save <modelname>")
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
fmt.Println("error: couldn't connect to ollama server")
|
||||
return err
|
||||
}
|
||||
|
||||
req := &api.CreateRequest{
|
||||
Name: args[1],
|
||||
Modelfile: buildModelfile(opts),
|
||||
}
|
||||
fn := func(resp api.ProgressResponse) error { return nil }
|
||||
err = client.Create(cmd.Context(), req, fn)
|
||||
if err != nil {
|
||||
fmt.Println("error: couldn't save model")
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Created new model '%s'\n", args[1])
|
||||
continue
|
||||
case strings.HasPrefix(line, "/set"):
|
||||
args := strings.Fields(line)
|
||||
if len(args) > 1 {
|
||||
switch args[1] {
|
||||
case "history":
|
||||
scanner.HistoryEnable()
|
||||
case "nohistory":
|
||||
scanner.HistoryDisable()
|
||||
case "wordwrap":
|
||||
opts.WordWrap = true
|
||||
fmt.Println("Set 'wordwrap' mode.")
|
||||
case "nowordwrap":
|
||||
opts.WordWrap = false
|
||||
fmt.Println("Set 'nowordwrap' mode.")
|
||||
case "verbose":
|
||||
cmd.Flags().Set("verbose", "true")
|
||||
fmt.Println("Set 'verbose' mode.")
|
||||
case "quiet":
|
||||
cmd.Flags().Set("verbose", "false")
|
||||
fmt.Println("Set 'quiet' mode.")
|
||||
case "format":
|
||||
if len(args) < 3 || args[2] != "json" {
|
||||
fmt.Println("Invalid or missing format. For 'json' mode use '/set format json'")
|
||||
} else {
|
||||
opts.Format = args[2]
|
||||
fmt.Printf("Set format to '%s' mode.\n", args[2])
|
||||
}
|
||||
case "noformat":
|
||||
opts.Format = ""
|
||||
fmt.Println("Disabled format.")
|
||||
case "parameter":
|
||||
if len(args) < 4 {
|
||||
usageParameters()
|
||||
continue
|
||||
}
|
||||
params := args[3:]
|
||||
fp, err := api.FormatParams(map[string][]string{args[2]: params})
|
||||
if err != nil {
|
||||
fmt.Printf("Couldn't set parameter: %q\n", err)
|
||||
continue
|
||||
}
|
||||
fmt.Printf("Set parameter '%s' to '%s'\n", args[2], strings.Join(params, ", "))
|
||||
opts.Options[args[2]] = fp[args[2]]
|
||||
case "system", "template":
|
||||
if len(args) < 3 {
|
||||
usageSet()
|
||||
continue
|
||||
}
|
||||
|
||||
if args[1] == "system" {
|
||||
multiline = MultilineSystem
|
||||
} else if args[1] == "template" {
|
||||
multiline = MultilineTemplate
|
||||
}
|
||||
|
||||
line := strings.Join(args[2:], " ")
|
||||
line, ok := strings.CutPrefix(line, `"""`)
|
||||
if !ok {
|
||||
multiline = MultilineNone
|
||||
} else {
|
||||
// only cut suffix if the line is multiline
|
||||
line, ok = strings.CutSuffix(line, `"""`)
|
||||
if ok {
|
||||
multiline = MultilineNone
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString(line)
|
||||
if multiline != MultilineNone {
|
||||
scanner.Prompt.UseAlt = true
|
||||
continue
|
||||
}
|
||||
|
||||
if args[1] == "system" {
|
||||
opts.System = sb.String() // for display in modelfile
|
||||
newMessage := api.Message{Role: "system", Content: sb.String()}
|
||||
// Check if the slice is not empty and the last message is from 'system'
|
||||
if len(opts.Messages) > 0 && opts.Messages[len(opts.Messages)-1].Role == "system" {
|
||||
// Replace the last message
|
||||
opts.Messages[len(opts.Messages)-1] = newMessage
|
||||
} else {
|
||||
opts.Messages = append(opts.Messages, newMessage)
|
||||
}
|
||||
fmt.Println("Set system message.")
|
||||
sb.Reset()
|
||||
} else if args[1] == "template" {
|
||||
opts.Template = sb.String()
|
||||
fmt.Println("Set prompt template.")
|
||||
sb.Reset()
|
||||
}
|
||||
|
||||
sb.Reset()
|
||||
continue
|
||||
default:
|
||||
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
|
||||
}
|
||||
} else {
|
||||
usageSet()
|
||||
}
|
||||
case strings.HasPrefix(line, "/show"):
|
||||
args := strings.Fields(line)
|
||||
if len(args) > 1 {
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
fmt.Println("error: couldn't connect to ollama server")
|
||||
return err
|
||||
}
|
||||
req := &api.ShowRequest{
|
||||
Name: opts.Model,
|
||||
System: opts.System,
|
||||
Template: opts.Template,
|
||||
Options: opts.Options,
|
||||
}
|
||||
resp, err := client.Show(cmd.Context(), req)
|
||||
if err != nil {
|
||||
fmt.Println("error: couldn't get model")
|
||||
return err
|
||||
}
|
||||
|
||||
switch args[1] {
|
||||
case "info":
|
||||
fmt.Println("Model details:")
|
||||
if len(resp.Details.Families) > 0 {
|
||||
fmt.Printf("Family %s\n", strings.Join(resp.Details.Families, ", "))
|
||||
} else if resp.Details.Family != "" {
|
||||
fmt.Printf("Family %s\n", resp.Details.Family)
|
||||
}
|
||||
fmt.Printf("Parameter Size %s\n", resp.Details.ParameterSize)
|
||||
fmt.Printf("Quantization Level %s\n", resp.Details.QuantizationLevel)
|
||||
fmt.Println("")
|
||||
case "license":
|
||||
if resp.License == "" {
|
||||
fmt.Println("No license was specified for this model.")
|
||||
} else {
|
||||
fmt.Println(resp.License)
|
||||
}
|
||||
case "modelfile":
|
||||
fmt.Println(resp.Modelfile)
|
||||
case "parameters":
|
||||
if resp.Parameters == "" {
|
||||
fmt.Println("No parameters were specified for this model.")
|
||||
} else {
|
||||
if len(opts.Options) > 0 {
|
||||
fmt.Println("User defined parameters:")
|
||||
for k, v := range opts.Options {
|
||||
fmt.Printf("%-*s %v\n", 30, k, v)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
fmt.Println("Model defined parameters:")
|
||||
fmt.Println(resp.Parameters)
|
||||
}
|
||||
case "system":
|
||||
switch {
|
||||
case opts.System != "":
|
||||
fmt.Println(opts.System + "\n")
|
||||
case resp.System != "":
|
||||
fmt.Println(resp.System + "\n")
|
||||
default:
|
||||
fmt.Println("No system message was specified for this model.")
|
||||
}
|
||||
case "template":
|
||||
switch {
|
||||
case opts.Template != "":
|
||||
fmt.Println(opts.Template + "\n")
|
||||
case resp.Template != "":
|
||||
fmt.Println(resp.Template)
|
||||
default:
|
||||
fmt.Println("No prompt template was specified for this model.")
|
||||
}
|
||||
default:
|
||||
fmt.Printf("Unknown command '/show %s'. Type /? for help\n", args[1])
|
||||
}
|
||||
} else {
|
||||
usageShow()
|
||||
}
|
||||
case strings.HasPrefix(line, "/help"), strings.HasPrefix(line, "/?"):
|
||||
args := strings.Fields(line)
|
||||
if len(args) > 1 {
|
||||
switch args[1] {
|
||||
case "set", "/set":
|
||||
usageSet()
|
||||
case "show", "/show":
|
||||
usageShow()
|
||||
case "shortcut", "shortcuts":
|
||||
usageShortcuts()
|
||||
}
|
||||
} else {
|
||||
usage()
|
||||
}
|
||||
case strings.HasPrefix(line, "/exit"), strings.HasPrefix(line, "/bye"):
|
||||
return nil
|
||||
case strings.HasPrefix(line, "/"):
|
||||
args := strings.Fields(line)
|
||||
isFile := false
|
||||
|
||||
if opts.MultiModal {
|
||||
for _, f := range extractFileNames(line) {
|
||||
if strings.HasPrefix(f, args[0]) {
|
||||
isFile = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !isFile {
|
||||
fmt.Printf("Unknown command '%s'. Type /? for help\n", args[0])
|
||||
continue
|
||||
}
|
||||
|
||||
sb.WriteString(line)
|
||||
default:
|
||||
sb.WriteString(line)
|
||||
}
|
||||
|
||||
if sb.Len() > 0 && multiline == MultilineNone {
|
||||
newMessage := api.Message{Role: "user", Content: sb.String()}
|
||||
|
||||
if opts.MultiModal {
|
||||
msg, images, err := extractFileData(sb.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// clear all previous images for better responses
|
||||
if len(images) > 0 {
|
||||
for i := range opts.Messages {
|
||||
opts.Messages[i].Images = nil
|
||||
}
|
||||
}
|
||||
|
||||
newMessage.Content = msg
|
||||
newMessage.Images = images
|
||||
}
|
||||
|
||||
opts.Messages = append(opts.Messages, newMessage)
|
||||
|
||||
assistant, err := chat(cmd, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if assistant != nil {
|
||||
opts.Messages = append(opts.Messages, *assistant)
|
||||
}
|
||||
|
||||
sb.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func buildModelfile(opts runOptions) string {
|
||||
var mf strings.Builder
|
||||
model := opts.ParentModel
|
||||
if model == "" {
|
||||
model = opts.Model
|
||||
}
|
||||
fmt.Fprintf(&mf, "FROM %s\n", model)
|
||||
if opts.System != "" {
|
||||
fmt.Fprintf(&mf, "SYSTEM \"\"\"%s\"\"\"\n", opts.System)
|
||||
}
|
||||
|
||||
if opts.Template != "" {
|
||||
fmt.Fprintf(&mf, "TEMPLATE \"\"\"%s\"\"\"\n", opts.Template)
|
||||
}
|
||||
|
||||
keys := make([]string, 0)
|
||||
for k := range opts.Options {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
fmt.Fprintf(&mf, "PARAMETER %s %v\n", k, opts.Options[k])
|
||||
}
|
||||
fmt.Fprintln(&mf)
|
||||
|
||||
for _, msg := range opts.Messages {
|
||||
fmt.Fprintf(&mf, "MESSAGE %s \"\"\"%s\"\"\"\n", msg.Role, msg.Content)
|
||||
}
|
||||
|
||||
return mf.String()
|
||||
}
|
||||
|
||||
func normalizeFilePath(fp string) string {
|
||||
// Define a map of escaped characters and their replacements
|
||||
replacements := map[string]string{
|
||||
"\\ ": " ", // Escaped space
|
||||
"\\(": "(", // Escaped left parenthesis
|
||||
"\\)": ")", // Escaped right parenthesis
|
||||
"\\[": "[", // Escaped left square bracket
|
||||
"\\]": "]", // Escaped right square bracket
|
||||
"\\{": "{", // Escaped left curly brace
|
||||
"\\}": "}", // Escaped right curly brace
|
||||
"\\$": "$", // Escaped dollar sign
|
||||
"\\&": "&", // Escaped ampersand
|
||||
"\\;": ";", // Escaped semicolon
|
||||
"\\'": "'", // Escaped single quote
|
||||
"\\\\": "\\", // Escaped backslash
|
||||
"\\*": "*", // Escaped asterisk
|
||||
"\\?": "?", // Escaped question mark
|
||||
}
|
||||
|
||||
for escaped, actual := range replacements {
|
||||
fp = strings.ReplaceAll(fp, escaped, actual)
|
||||
}
|
||||
return fp
|
||||
}
|
||||
|
||||
func extractFileNames(input string) []string {
|
||||
// Regex to match file paths starting with optional drive letter, / ./ \ or .\ and include escaped or unescaped spaces (\ or %20)
|
||||
// and followed by more characters and a file extension
|
||||
// This will capture non filename strings, but we'll check for file existence to remove mismatches
|
||||
regexPattern := `(?:[a-zA-Z]:)?(?:\./|/|\\)[\S\\ ]+?\.(?i:jpg|jpeg|png|svg)\b`
|
||||
re := regexp.MustCompile(regexPattern)
|
||||
|
||||
return re.FindAllString(input, -1)
|
||||
}
|
||||
|
||||
func extractFileData(input string) (string, []api.ImageData, error) {
|
||||
filePaths := extractFileNames(input)
|
||||
var imgs []api.ImageData
|
||||
|
||||
for _, fp := range filePaths {
|
||||
nfp := normalizeFilePath(fp)
|
||||
data, err := getImageData(nfp)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Couldn't process image: %q\n", err)
|
||||
return "", imgs, err
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Added image '%s'\n", nfp)
|
||||
input = strings.ReplaceAll(input, fp, "")
|
||||
imgs = append(imgs, data)
|
||||
}
|
||||
return input, imgs, nil
|
||||
}
|
||||
|
||||
func getImageData(filePath string) ([]byte, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
buf := make([]byte, 512)
|
||||
_, err = file.Read(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentType := http.DetectContentType(buf)
|
||||
allowedTypes := []string{"image/jpeg", "image/jpg", "image/png"}
|
||||
if !slices.Contains(allowedTypes, contentType) {
|
||||
return nil, fmt.Errorf("invalid image type: %s", contentType)
|
||||
}
|
||||
|
||||
info, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if the file size exceeds 100MB
|
||||
var maxSize int64 = 100 * 1024 * 1024 // 100MB in bytes
|
||||
if info.Size() > maxSize {
|
||||
return nil, fmt.Errorf("file size exceeds maximum limit (100MB)")
|
||||
}
|
||||
|
||||
buf = make([]byte, info.Size())
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = io.ReadFull(file, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
@@ -1,116 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"text/template"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
func TestExtractFilenames(t *testing.T) {
|
||||
// Unix style paths
|
||||
input := ` some preamble
|
||||
./relative\ path/one.png inbetween1 ./not a valid two.jpg inbetween2
|
||||
/unescaped space /three.jpeg inbetween3 /valid\ path/dir/four.png "./quoted with spaces/five.svg`
|
||||
res := extractFileNames(input)
|
||||
assert.Len(t, res, 5)
|
||||
assert.Contains(t, res[0], "one.png")
|
||||
assert.Contains(t, res[1], "two.jpg")
|
||||
assert.Contains(t, res[2], "three.jpeg")
|
||||
assert.Contains(t, res[3], "four.png")
|
||||
assert.Contains(t, res[4], "five.svg")
|
||||
assert.NotContains(t, res[4], '"')
|
||||
assert.NotContains(t, res, "inbtween")
|
||||
|
||||
// Windows style paths
|
||||
input = ` some preamble
|
||||
c:/users/jdoe/one.png inbetween1 c:/program files/someplace/two.jpg inbetween2
|
||||
/absolute/nospace/three.jpeg inbetween3 /absolute/with space/four.png inbetween4
|
||||
./relative\ path/five.svg inbetween5 "./relative with/spaces/six.png inbetween6
|
||||
d:\path with\spaces\seven.svg inbetween7 c:\users\jdoe\eight.png inbetween8
|
||||
d:\program files\someplace\nine.png inbetween9 "E:\program files\someplace\ten.svg some ending
|
||||
`
|
||||
res = extractFileNames(input)
|
||||
assert.Len(t, res, 10)
|
||||
assert.NotContains(t, res, "inbtween")
|
||||
assert.Contains(t, res[0], "one.png")
|
||||
assert.Contains(t, res[0], "c:")
|
||||
assert.Contains(t, res[1], "two.jpg")
|
||||
assert.Contains(t, res[1], "c:")
|
||||
assert.Contains(t, res[2], "three.jpeg")
|
||||
assert.Contains(t, res[3], "four.png")
|
||||
assert.Contains(t, res[4], "five.svg")
|
||||
assert.Contains(t, res[5], "six.png")
|
||||
assert.Contains(t, res[6], "seven.svg")
|
||||
assert.Contains(t, res[6], "d:")
|
||||
assert.Contains(t, res[7], "eight.png")
|
||||
assert.Contains(t, res[7], "c:")
|
||||
assert.Contains(t, res[8], "nine.png")
|
||||
assert.Contains(t, res[8], "d:")
|
||||
assert.Contains(t, res[9], "ten.svg")
|
||||
assert.Contains(t, res[9], "E:")
|
||||
}
|
||||
|
||||
func TestModelfileBuilder(t *testing.T) {
|
||||
opts := runOptions{
|
||||
Model: "hork",
|
||||
System: "You are part horse and part shark, but all hork. Do horklike things",
|
||||
Template: "This is a template.",
|
||||
Messages: []api.Message{
|
||||
{Role: "user", Content: "Hey there hork!"},
|
||||
{Role: "assistant", Content: "Yes it is true, I am half horse, half shark."},
|
||||
},
|
||||
Options: map[string]interface{}{},
|
||||
}
|
||||
|
||||
opts.Options["temperature"] = 0.9
|
||||
opts.Options["seed"] = 42
|
||||
opts.Options["penalize_newline"] = false
|
||||
opts.Options["stop"] = []string{"hi", "there"}
|
||||
|
||||
mf := buildModelfile(opts)
|
||||
expectedModelfile := `FROM {{.Model}}
|
||||
SYSTEM """{{.System}}"""
|
||||
TEMPLATE """{{.Template}}"""
|
||||
PARAMETER penalize_newline false
|
||||
PARAMETER seed 42
|
||||
PARAMETER stop [hi there]
|
||||
PARAMETER temperature 0.9
|
||||
|
||||
MESSAGE user """Hey there hork!"""
|
||||
MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
||||
`
|
||||
|
||||
tmpl, err := template.New("").Parse(expectedModelfile)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = tmpl.Execute(&buf, opts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, buf.String(), mf)
|
||||
|
||||
opts.ParentModel = "horseshark"
|
||||
mf = buildModelfile(opts)
|
||||
expectedModelfile = `FROM {{.ParentModel}}
|
||||
SYSTEM """{{.System}}"""
|
||||
TEMPLATE """{{.Template}}"""
|
||||
PARAMETER penalize_newline false
|
||||
PARAMETER seed 42
|
||||
PARAMETER stop [hi there]
|
||||
PARAMETER temperature 0.9
|
||||
|
||||
MESSAGE user """Hey there hork!"""
|
||||
MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
||||
`
|
||||
|
||||
tmpl, err = template.New("").Parse(expectedModelfile)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var parentBuf bytes.Buffer
|
||||
err = tmpl.Execute(&parentBuf, opts)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, parentBuf.String(), mf)
|
||||
}
|
44
cmd/spinner.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/jmorganca/ollama/progressbar"
|
||||
)
|
||||
|
||||
type Spinner struct {
|
||||
description string
|
||||
*progressbar.ProgressBar
|
||||
}
|
||||
|
||||
func NewSpinner(description string) *Spinner {
|
||||
return &Spinner{
|
||||
description: description,
|
||||
ProgressBar: progressbar.NewOptions(-1,
|
||||
progressbar.OptionSetWriter(os.Stderr),
|
||||
progressbar.OptionThrottle(60*time.Millisecond),
|
||||
progressbar.OptionSpinnerType(14),
|
||||
progressbar.OptionSetRenderBlankState(true),
|
||||
progressbar.OptionSetElapsedTime(false),
|
||||
progressbar.OptionClearOnFinish(),
|
||||
progressbar.OptionSetDescription(description),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spinner) Spin(tick time.Duration) {
|
||||
for range time.Tick(tick) {
|
||||
if s.IsFinished() {
|
||||
break
|
||||
}
|
||||
|
||||
s.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spinner) Stop() {
|
||||
s.Finish()
|
||||
fmt.Println(s.description)
|
||||
}
|
@@ -1,30 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
func startApp(ctx context.Context, client *api.Client) error {
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
link, err := os.Readlink(exe)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(link, "Ollama.app") {
|
||||
return fmt.Errorf("could not find ollama app")
|
||||
}
|
||||
path := strings.Split(link, "Ollama.app")
|
||||
if err := exec.Command("/usr/bin/open", "-a", path[0]+"Ollama.app").Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return waitForServer(ctx, client)
|
||||
}
|
@@ -1,14 +0,0 @@
|
||||
//go:build !windows && !darwin
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
func startApp(ctx context.Context, client *api.Client) error {
|
||||
return fmt.Errorf("could not connect to ollama server, run 'ollama serve' to start it")
|
||||
}
|
@@ -1,58 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
func startApp(ctx context.Context, client *api.Client) error {
|
||||
// log.Printf("XXX Attempting to find and start ollama app")
|
||||
AppName := "ollama app.exe"
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
appExe := filepath.Join(filepath.Dir(exe), AppName)
|
||||
_, err = os.Stat(appExe)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// Try the standard install location
|
||||
localAppData := os.Getenv("LOCALAPPDATA")
|
||||
appExe = filepath.Join(localAppData, "Ollama", AppName)
|
||||
_, err := os.Stat(appExe)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// Finally look in the path
|
||||
appExe, err = exec.LookPath(AppName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not locate ollama app")
|
||||
}
|
||||
}
|
||||
}
|
||||
// log.Printf("XXX attempting to start app %s", appExe)
|
||||
|
||||
cmd_path := "c:\\Windows\\system32\\cmd.exe"
|
||||
cmd := exec.Command(cmd_path, "/c", appExe)
|
||||
// TODO - these hide flags aren't working - still pops up a command window for some reason
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{CreationFlags: 0x08000000, HideWindow: true}
|
||||
|
||||
// TODO this didn't help either...
|
||||
cmd.Stdin = strings.NewReader("")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return fmt.Errorf("unable to start ollama app %w", err)
|
||||
}
|
||||
|
||||
if cmd.Process != nil {
|
||||
defer cmd.Process.Release() //nolint:errcheck
|
||||
}
|
||||
return waitForServer(ctx, client)
|
||||
}
|
@@ -1,25 +1,6 @@
|
||||
# Documentation
|
||||
|
||||
To get started, see the project's **[quickstart](../README.md#quickstart)**.
|
||||
|
||||
Ollama is a tool for running AI models on your hardware. Many users will choose to use the Command Line Interface (CLI) to work with Ollama. Learn more about all the commands in the CLI in the **[Main Readme](../README.md)**.
|
||||
|
||||
Use the RESTful API using any language, including Python, JavaScript, Typescript, Go, Rust, and many more. Learn more about using the API in the **[API Documentation](./api.md)**.
|
||||
|
||||
Create new models or modify models already in the library using the Modelfile. Learn more about the Modelfile syntax in the **[Modelfile Documentation](./modelfile.md)**.
|
||||
|
||||
Import models using source model weights found on Hugging Face and similar sites by referring to the **[Import Documentation](./import.md)**.
|
||||
|
||||
Installing on Linux in most cases is easy using the script on [ollama.com/download](ollama.com/download). To get more detail about the install, including CUDA drivers, see the **[Linux Documentation](./linux.md)**.
|
||||
|
||||
Many of our users like the flexibility of using our official Docker Image. Learn more about using Docker with Ollama using the **[Docker Documentation](https://hub.docker.com/r/ollama/ollama)**.
|
||||
|
||||
It is easy to install on Linux and Mac, but many users will choose to build Ollama on their own. To do this, refer to the **[Development Documentation](./development.md)**.
|
||||
|
||||
If encountering a problem with Ollama, the best place to start is the logs. Find more information about them here in the **[Troubleshooting Guide](./troubleshooting.md)**.
|
||||
|
||||
Finally for all the questions that don't fit anywhere else, there is the **[FAQ](./faq.md)**
|
||||
|
||||
[Tutorials](./tutorials.md) apply the documentation to tasks.
|
||||
|
||||
For working code examples of using Ollama, see [Examples](../examples).
|
||||
- [Modelfile](./modelfile.md)
|
||||
- [How to develop Ollama](./development.md)
|
||||
- [API](./api.md)
|
||||
- [Tutorials](./tutorials.md)
|
||||
|
819
docs/api.md
@@ -1,26 +1,20 @@
|
||||
# Development
|
||||
|
||||
- Install cmake or (optionally, required tools for GPUs)
|
||||
- run `go generate ./...`
|
||||
- run `go build .`
|
||||
|
||||
Install required tools:
|
||||
|
||||
- cmake version 3.24 or higher
|
||||
- go version 1.21 or higher
|
||||
- go version 1.20 or higher
|
||||
- gcc version 11.4.0 or higher
|
||||
|
||||
```bash
|
||||
brew install go cmake gcc
|
||||
```
|
||||
|
||||
Optionally enable debugging and more verbose logging:
|
||||
|
||||
```bash
|
||||
# At build time
|
||||
export CGO_CFLAGS="-g"
|
||||
|
||||
# At runtime
|
||||
export OLLAMA_DEBUG=1
|
||||
```
|
||||
|
||||
Get the required libraries and build the native LLM code:
|
||||
Get the required libraries:
|
||||
|
||||
```bash
|
||||
go generate ./...
|
||||
@@ -38,101 +32,8 @@ Now you can run `ollama`:
|
||||
./ollama
|
||||
```
|
||||
|
||||
### Linux
|
||||
## Building on Linux with GPU support
|
||||
|
||||
#### Linux CUDA (NVIDIA)
|
||||
|
||||
*Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
|
||||
|
||||
Install `cmake` and `golang` as well as [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads)
|
||||
development and runtime packages.
|
||||
|
||||
Typically the build scripts will auto-detect CUDA, however, if your Linux distro
|
||||
or installation approach uses unusual paths, you can specify the location by
|
||||
specifying an environment variable `CUDA_LIB_DIR` to the location of the shared
|
||||
libraries, and `CUDACXX` to the location of the nvcc compiler. You can customize
|
||||
set set of target CUDA architectues by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "50;60;70")
|
||||
|
||||
Then generate dependencies:
|
||||
|
||||
```
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Then build the binary:
|
||||
|
||||
```
|
||||
go build .
|
||||
```
|
||||
|
||||
#### Linux ROCm (AMD)
|
||||
|
||||
*Your operating system distribution may already have packages for AMD ROCm and CLBlast. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
|
||||
|
||||
Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) developement packages first, as well as `cmake` and `golang`.
|
||||
|
||||
Typically the build scripts will auto-detect ROCm, however, if your Linux distro
|
||||
or installation approach uses unusual paths, you can specify the location by
|
||||
specifying an environment variable `ROCM_PATH` to the location of the ROCm
|
||||
install (typically `/opt/rocm`), and `CLBlast_DIR` to the location of the
|
||||
CLBlast install (typically `/usr/lib/cmake/CLBlast`). You can also customize
|
||||
the AMD GPU targets by setting AMDGPU_TARGETS (e.g. `AMDGPU_TARGETS="gfx1101;gfx1102"`)
|
||||
|
||||
```
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Then build the binary:
|
||||
|
||||
```
|
||||
go build .
|
||||
```
|
||||
|
||||
ROCm requires elevated privileges to access the GPU at runtime. On most distros you can add your user account to the `render` group, or run as root.
|
||||
|
||||
#### Advanced CPU Settings
|
||||
|
||||
By default, running `go generate ./...` will compile a few different variations
|
||||
of the LLM library based on common CPU families and vector math capabilities,
|
||||
including a lowest-common-denominator which should run on almost any 64 bit CPU
|
||||
somewhat slowly. At runtime, Ollama will auto-detect the optimal variation to
|
||||
load. If you would like to build a CPU-based build customized for your
|
||||
processor, you can set `OLLAMA_CUSTOM_CPU_DEFS` to the llama.cpp flags you would
|
||||
like to use. For example, to compile an optimized binary for an Intel i9-9880H,
|
||||
you might use:
|
||||
|
||||
```
|
||||
OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./...
|
||||
go build .
|
||||
```
|
||||
|
||||
#### Containerized Linux Build
|
||||
|
||||
If you have Docker available, you can build linux binaries with `./scripts/build_linux.sh` which has the CUDA and ROCm dependencies included. The resulting binary is placed in `./dist`
|
||||
|
||||
|
||||
### Windows
|
||||
|
||||
Note: The windows build for Ollama is still under development.
|
||||
|
||||
Install required tools:
|
||||
|
||||
- MSVC toolchain - C/C++ and cmake as minimal requirements
|
||||
- go version 1.21 or higher
|
||||
- MinGW (pick one variant) with GCC.
|
||||
- <https://www.mingw-w64.org/>
|
||||
- <https://www.msys2.org/>
|
||||
|
||||
```powershell
|
||||
$env:CGO_ENABLED="1"
|
||||
|
||||
go generate ./...
|
||||
|
||||
go build .
|
||||
```
|
||||
|
||||
#### Windows CUDA (NVIDIA)
|
||||
|
||||
In addition to the common Windows development tools described above, install:
|
||||
|
||||
- [NVIDIA CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html)
|
||||
- Install cmake and nvidia-cuda-toolkit
|
||||
- run `go generate ./...`
|
||||
- run `go build .`
|
||||
|
193
docs/faq.md
@@ -1,195 +1,18 @@
|
||||
# FAQ
|
||||
|
||||
## How can I upgrade Ollama?
|
||||
## How can I expose the Ollama server?
|
||||
|
||||
Ollama on macOS and Windows will automatically download updates. Click on the taskbar or menubar item and then click "Restart to update" to apply the update. Updates can also be installed by downloading the latest version [manually](https://ollama.com/download/).
|
||||
|
||||
On Linux, re-run the install script:
|
||||
|
||||
```
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
```bash
|
||||
OLLAMA_HOST=0.0.0.0:11435 ollama serve
|
||||
```
|
||||
|
||||
## How can I view the logs?
|
||||
|
||||
Review the [Troubleshooting](./troubleshooting.md) docs for more about using logs.
|
||||
|
||||
## How can I specify the context window size?
|
||||
|
||||
By default, Ollama uses a context window size of 2048 tokens.
|
||||
|
||||
To change this when using `ollama run`, use `/set parameter`:
|
||||
By default, Ollama allows cross origin requests from `127.0.0.1` and `0.0.0.0`. To support more origins, you can use the `OLLAMA_ORIGINS` environment variable:
|
||||
|
||||
```bash
|
||||
OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com ollama serve
|
||||
```
|
||||
/set parameter num_ctx 4096
|
||||
```
|
||||
|
||||
When using the API, specify the `num_ctx` parameter:
|
||||
|
||||
```
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama2",
|
||||
"prompt": "Why is the sky blue?",
|
||||
"options": {
|
||||
"num_ctx": 4096
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## How do I configure Ollama server?
|
||||
|
||||
Ollama server can be configured with environment variables.
|
||||
|
||||
### Setting environment variables on Mac
|
||||
|
||||
If Ollama is run as a macOS application, environment variables should be set using `launchctl`:
|
||||
|
||||
1. For each environment variable, call `launchctl setenv`.
|
||||
|
||||
```bash
|
||||
launchctl setenv OLLAMA_HOST "0.0.0.0"
|
||||
```
|
||||
|
||||
2. Restart Ollama application.
|
||||
|
||||
### Setting environment variables on Linux
|
||||
|
||||
If Ollama is run as a systemd service, environment variables should be set using `systemctl`:
|
||||
|
||||
1. Edit the systemd service by calling `systemctl edit ollama.service`. This will open an editor.
|
||||
|
||||
2. For each environment variable, add a line `Environment` under section `[Service]`:
|
||||
|
||||
```ini
|
||||
[Service]
|
||||
Environment="OLLAMA_HOST=0.0.0.0"
|
||||
```
|
||||
|
||||
3. Save and exit.
|
||||
|
||||
4. Reload `systemd` and restart Ollama:
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl restart ollama
|
||||
```
|
||||
|
||||
### Setting environment variables on Windows
|
||||
|
||||
On windows, Ollama inherits your user and system environment variables.
|
||||
|
||||
1. First Quit Ollama by clicking on it in the task bar
|
||||
|
||||
2. Edit system environment variables from the control panel
|
||||
|
||||
3. Edit or create New variable(s) for your user account for `OLLAMA_HOST`, `OLLAMA_MODELS`, etc.
|
||||
|
||||
4. Click OK/Apply to save
|
||||
|
||||
5. Run `ollama` from a new terminal window
|
||||
|
||||
|
||||
## How can I expose Ollama on my network?
|
||||
|
||||
Ollama binds 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
|
||||
|
||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||
|
||||
## How can I allow additional web origins to access Ollama?
|
||||
|
||||
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Additional origins can be configured with `OLLAMA_ORIGINS`.
|
||||
|
||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||
|
||||
## Where are models stored?
|
||||
|
||||
- macOS: `~/.ollama/models`
|
||||
- Linux: `/usr/share/ollama/.ollama/models`
|
||||
- Windows: `C:\Users\<username>\.ollama\models`
|
||||
|
||||
### How do I set them to a different location?
|
||||
|
||||
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory.
|
||||
|
||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||
|
||||
## Does Ollama send my prompts and answers back to Ollama.ai to use in any way?
|
||||
|
||||
No, Ollama runs entirely locally, and conversation data will never leave your machine.
|
||||
|
||||
## How can I use Ollama in Visual Studio Code?
|
||||
|
||||
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/jmorganca/ollama#extensions--plugins) at the bottom of the main repository readme.
|
||||
|
||||
## How do I use Ollama behind a proxy?
|
||||
|
||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
||||
|
||||
### How do I use Ollama behind a proxy in Docker?
|
||||
|
||||
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
||||
|
||||
Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||
|
||||
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
||||
|
||||
```dockerfile
|
||||
FROM ollama/ollama
|
||||
COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt
|
||||
RUN update-ca-certificates
|
||||
```
|
||||
|
||||
Build and run this image:
|
||||
|
||||
```shell
|
||||
docker build -t ollama-with-ca .
|
||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||
```
|
||||
|
||||
## How do I use Ollama with GPU acceleration in Docker?
|
||||
|
||||
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
||||
|
||||
GPU acceleration is not available for Docker Desktop in macOS due to the lack of GPU passthrough and emulation.
|
||||
|
||||
## Why is networking slow in WSL2 on Windows 10?
|
||||
|
||||
This can impact both installing Ollama, as well as downloading models.
|
||||
|
||||
Open `Control Panel > Networking and Internet > View network status and tasks` and click on `Change adapter settings` on the left panel. Find the `vEthernel (WSL)` adapter, right click and select `Properties`.
|
||||
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
||||
properties.
|
||||
|
||||
## How can I pre-load a model to get faster response times?
|
||||
|
||||
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
||||
|
||||
To preload the mistral model using the generate endpoint, use:
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{"model": "mistral"}'
|
||||
```
|
||||
|
||||
To use the chat completions endpoint, use:
|
||||
```shell
|
||||
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
||||
```
|
||||
|
||||
## How do I keep a model loaded in memory or make it unload immediately?
|
||||
|
||||
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
||||
|
||||
The `keep_alive` parameter can be set to:
|
||||
* a duration string (such as "10m" or "24h")
|
||||
* a number in seconds (such as 3600)
|
||||
* any negative number which will keep the model loaded in memory (e.g. -1 or "-1m")
|
||||
* '0' which will unload the model immediately after generating a response
|
||||
|
||||
For example, to preload a model and leave it in memory use:
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{"model": "llama2", "keep_alive": -1}'
|
||||
```
|
||||
|
||||
To unload the model and free up memory use:
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{"model": "llama2", "keep_alive": 0}'
|
||||
```
|
||||
* macOS: Raw model data is stored under `~/.ollama/models`.
|
||||
* Linux: Raw model data is stored under `/usr/share/ollama/.ollama/models`
|
||||
|
165
docs/import.md
@@ -1,165 +0,0 @@
|
||||
# Import a model
|
||||
|
||||
This guide walks through importing a GGUF, PyTorch or Safetensors model.
|
||||
|
||||
## Importing (GGUF)
|
||||
|
||||
### Step 1: Write a `Modelfile`
|
||||
|
||||
Start by creating a `Modelfile`. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more.
|
||||
|
||||
```
|
||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
||||
```
|
||||
|
||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
||||
|
||||
```
|
||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||
```
|
||||
|
||||
### Step 2: Create the Ollama model
|
||||
|
||||
Finally, create a model from your `Modelfile`:
|
||||
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
### Step 3: Run your model
|
||||
|
||||
Next, test the model with `ollama run`:
|
||||
|
||||
```
|
||||
ollama run example "What is your favourite condiment?"
|
||||
```
|
||||
|
||||
## Importing (PyTorch & Safetensors)
|
||||
|
||||
> Importing from PyTorch and Safetensors is a longer process than importing from GGUF. Improvements that make it easier are a work in progress.
|
||||
|
||||
### Setup
|
||||
|
||||
First, clone the `ollama/ollama` repo:
|
||||
|
||||
```
|
||||
git clone git@github.com:ollama/ollama.git ollama
|
||||
cd ollama
|
||||
```
|
||||
|
||||
and then fetch its `llama.cpp` submodule:
|
||||
|
||||
```shell
|
||||
git submodule init
|
||||
git submodule update llm/llama.cpp
|
||||
```
|
||||
|
||||
Next, install the Python dependencies:
|
||||
|
||||
```
|
||||
python3 -m venv llm/llama.cpp/.venv
|
||||
source llm/llama.cpp/.venv/bin/activate
|
||||
pip install -r llm/llama.cpp/requirements.txt
|
||||
```
|
||||
|
||||
Then build the `quantize` tool:
|
||||
|
||||
```
|
||||
make -C llm/llama.cpp quantize
|
||||
```
|
||||
|
||||
### Clone the HuggingFace repository (optional)
|
||||
|
||||
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
||||
|
||||
Install [Git LFS](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage), verify it's installed, and then clone the model's repository:
|
||||
|
||||
```
|
||||
git lfs install
|
||||
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1 model
|
||||
```
|
||||
|
||||
### Convert the model
|
||||
|
||||
> Note: some model architectures require using specific convert scripts. For example, Qwen models require running `convert-hf-to-gguf.py` instead of `convert.py`
|
||||
|
||||
```
|
||||
python llm/llama.cpp/convert.py ./model --outtype f16 --outfile converted.bin
|
||||
```
|
||||
|
||||
### Quantize the model
|
||||
|
||||
```
|
||||
llm/llama.cpp/quantize converted.bin quantized.bin q4_0
|
||||
```
|
||||
|
||||
### Step 3: Write a `Modelfile`
|
||||
|
||||
Next, create a `Modelfile` for your model:
|
||||
|
||||
```
|
||||
FROM quantized.bin
|
||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||
```
|
||||
|
||||
### Step 4: Create the Ollama model
|
||||
|
||||
Finally, create a model from your `Modelfile`:
|
||||
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
### Step 5: Run your model
|
||||
|
||||
Next, test the model with `ollama run`:
|
||||
|
||||
```
|
||||
ollama run example "What is your favourite condiment?"
|
||||
```
|
||||
|
||||
## Publishing your model (optional – early alpha)
|
||||
|
||||
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
||||
|
||||
1. Create [an account](https://ollama.com/signup)
|
||||
2. Run `cat ~/.ollama/id_ed25519.pub` (or `type %USERPROFILE%\.ollama\id_ed25519.pub` on Windows) to view your Ollama public key. Copy this to the clipboard.
|
||||
3. Add your public key to your [Ollama account](https://ollama.com/settings/keys)
|
||||
|
||||
Next, copy your model to your username's namespace:
|
||||
|
||||
```
|
||||
ollama cp example <your username>/example
|
||||
```
|
||||
|
||||
Then push the model:
|
||||
|
||||
```
|
||||
ollama push <your username>/example
|
||||
```
|
||||
|
||||
After publishing, your model will be available at `https://ollama.com/<your username>/example`.
|
||||
|
||||
## Quantization reference
|
||||
|
||||
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
|
||||
|
||||
- `q2_K`
|
||||
- `q3_K`
|
||||
- `q3_K_S`
|
||||
- `q3_K_M`
|
||||
- `q3_K_L`
|
||||
- `q4_0` (recommended)
|
||||
- `q4_1`
|
||||
- `q4_K`
|
||||
- `q4_K_S`
|
||||
- `q4_K_M`
|
||||
- `q5_0`
|
||||
- `q5_1`
|
||||
- `q5_K`
|
||||
- `q5_K_S`
|
||||
- `q5_K_M`
|
||||
- `q6_K`
|
||||
- `q8_0`
|
||||
- `f16`
|
105
docs/linux.md
@@ -1,27 +1,45 @@
|
||||
# Ollama on Linux
|
||||
|
||||
## Install
|
||||
|
||||
Install Ollama running this one-liner:
|
||||
# Installing Ollama on Linux
|
||||
|
||||
> Note: A one line installer for Ollama is available by running:
|
||||
>
|
||||
> ```bash
|
||||
> curl https://ollama.ai/install.sh | sh
|
||||
> ```
|
||||
|
||||
```bash
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
```
|
||||
|
||||
## Manual install
|
||||
|
||||
### Download the `ollama` binary
|
||||
## Download the `ollama` binary
|
||||
|
||||
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
|
||||
|
||||
```bash
|
||||
sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||
sudo chmod +x /usr/bin/ollama
|
||||
```
|
||||
|
||||
### Adding Ollama as a startup service (recommended)
|
||||
## Start Ollama
|
||||
|
||||
Start Ollama by running `ollama serve`:
|
||||
|
||||
```bash
|
||||
ollama serve
|
||||
```
|
||||
|
||||
Once Ollama is running, run a model in another terminal session:
|
||||
|
||||
```bash
|
||||
ollama run llama2
|
||||
```
|
||||
|
||||
## Install CUDA drivers (optional – for Nvidia GPUs)
|
||||
|
||||
[Download and install](https://developer.nvidia.com/cuda-downloads) CUDA.
|
||||
|
||||
Verify that the drivers are installed by running the following command, which should print details about your GPU:
|
||||
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
## Adding Ollama as a startup service (optional)
|
||||
|
||||
Create a user for Ollama:
|
||||
|
||||
@@ -42,6 +60,7 @@ User=ollama
|
||||
Group=ollama
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
Environment="HOME=/usr/share/ollama"
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
@@ -54,40 +73,7 @@ sudo systemctl daemon-reload
|
||||
sudo systemctl enable ollama
|
||||
```
|
||||
|
||||
### Install CUDA drivers (optional – for Nvidia GPUs)
|
||||
|
||||
[Download and install](https://developer.nvidia.com/cuda-downloads) CUDA.
|
||||
|
||||
Verify that the drivers are installed by running the following command, which should print details about your GPU:
|
||||
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
### Start Ollama
|
||||
|
||||
Start Ollama using `systemd`:
|
||||
|
||||
```bash
|
||||
sudo systemctl start ollama
|
||||
```
|
||||
|
||||
## Update
|
||||
|
||||
Update ollama by running the install script again:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
```
|
||||
|
||||
Or by downloading the ollama binary:
|
||||
|
||||
```bash
|
||||
sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||
sudo chmod +x /usr/bin/ollama
|
||||
```
|
||||
|
||||
## Viewing logs
|
||||
### Viewing logs
|
||||
|
||||
To view logs of Ollama running as a startup service, run:
|
||||
|
||||
@@ -95,26 +81,3 @@ To view logs of Ollama running as a startup service, run:
|
||||
journalctl -u ollama
|
||||
```
|
||||
|
||||
## Uninstall
|
||||
|
||||
Remove the ollama service:
|
||||
|
||||
```bash
|
||||
sudo systemctl stop ollama
|
||||
sudo systemctl disable ollama
|
||||
sudo rm /etc/systemd/system/ollama.service
|
||||
```
|
||||
|
||||
Remove the ollama binary from your bin directory (either `/usr/local/bin`, `/usr/bin`, or `/bin`):
|
||||
|
||||
```bash
|
||||
sudo rm $(which ollama)
|
||||
```
|
||||
|
||||
Remove the downloaded models and Ollama service user and group:
|
||||
|
||||
```bash
|
||||
sudo rm -r /usr/share/ollama
|
||||
sudo userdel ollama
|
||||
sudo groupdel ollama
|
||||
```
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Ollama Model File
|
||||
|
||||
> Note: `Modelfile` syntax is in development
|
||||
> Note: this `Modelfile` syntax is in development
|
||||
|
||||
A model file is the blueprint to create and share models with Ollama.
|
||||
|
||||
@@ -12,6 +12,7 @@ A model file is the blueprint to create and share models with Ollama.
|
||||
- [FROM (Required)](#from-required)
|
||||
- [Build from llama2](#build-from-llama2)
|
||||
- [Build from a bin file](#build-from-a-bin-file)
|
||||
- [EMBED](#embed)
|
||||
- [PARAMETER](#parameter)
|
||||
- [Valid Parameters and Values](#valid-parameters-and-values)
|
||||
- [TEMPLATE](#template)
|
||||
@@ -19,7 +20,6 @@ A model file is the blueprint to create and share models with Ollama.
|
||||
- [SYSTEM](#system)
|
||||
- [ADAPTER](#adapter)
|
||||
- [LICENSE](#license)
|
||||
- [MESSAGE](#message)
|
||||
- [Notes](#notes)
|
||||
|
||||
## Format
|
||||
@@ -31,20 +31,17 @@ The format of the `Modelfile`:
|
||||
INSTRUCTION arguments
|
||||
```
|
||||
|
||||
| Instruction | Description |
|
||||
| ----------------------------------- | -------------------------------------------------------------- |
|
||||
| [`FROM`](#from-required) (required) | Defines the base model to use. |
|
||||
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
||||
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
||||
| [`SYSTEM`](#system) | Specifies the system message that will be set in the template. |
|
||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||
| [`MESSAGE`](#message) | Specify message history. |
|
||||
| Instruction | Description |
|
||||
| ----------------------------------- | ------------------------------------------------------------- |
|
||||
| [`FROM`](#from-required) (required) | Defines the base model to use. |
|
||||
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
||||
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
||||
| [`SYSTEM`](#system) | Specifies the system prompt that will be set in the template. |
|
||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic `Modelfile`
|
||||
|
||||
An example of a `Modelfile` creating a mario blueprint:
|
||||
|
||||
```modelfile
|
||||
@@ -54,7 +51,7 @@ PARAMETER temperature 1
|
||||
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
||||
PARAMETER num_ctx 4096
|
||||
|
||||
# sets a custom system message to specify the behavior of the chat assistant
|
||||
# sets a custom system prompt to specify the behavior of the chat assistant
|
||||
SYSTEM You are Mario from super mario bros, acting as an assistant.
|
||||
```
|
||||
|
||||
@@ -67,35 +64,6 @@ To use this:
|
||||
|
||||
More examples are available in the [examples directory](../examples).
|
||||
|
||||
### `Modelfile`s in [ollama.com/library][1]
|
||||
|
||||
There are two ways to view `Modelfile`s underlying the models in [ollama.com/library][1]:
|
||||
|
||||
- Option 1: view a details page from a model's tags page:
|
||||
1. Go to a particular model's tags (e.g. https://ollama.com/library/llama2/tags)
|
||||
2. Click on a tag (e.g. https://ollama.com/library/llama2:13b)
|
||||
3. Scroll down to "Layers"
|
||||
- Note: if the [`FROM` instruction](#from-required) is not present,
|
||||
it means the model was created from a local file
|
||||
- Option 2: use `ollama show` to print the `Modelfile` for any local models like so:
|
||||
|
||||
```bash
|
||||
> ollama show --modelfile llama2:13b
|
||||
# Modelfile generated by "ollama show"
|
||||
# To build a new Modelfile based on this one, replace the FROM line with:
|
||||
# FROM llama2:13b
|
||||
|
||||
FROM /root/.ollama/models/blobs/sha256:123abc
|
||||
TEMPLATE """[INST] {{ if .System }}<<SYS>>{{ .System }}<</SYS>>
|
||||
|
||||
{{ end }}{{ .Prompt }} [/INST] """
|
||||
SYSTEM """"""
|
||||
PARAMETER stop [INST]
|
||||
PARAMETER stop [/INST]
|
||||
PARAMETER stop <<SYS>>
|
||||
PARAMETER stop <</SYS>>
|
||||
```
|
||||
|
||||
## Instructions
|
||||
|
||||
### FROM (Required)
|
||||
@@ -123,6 +91,17 @@ FROM ./ollama-model.bin
|
||||
|
||||
This bin file location should be specified as an absolute path or relative to the `Modelfile` location.
|
||||
|
||||
### EMBED
|
||||
|
||||
The `EMBED` instruction is used to add embeddings of files to a model. This is useful for adding custom data that the model can reference when generating an answer. Note that currently only text files are supported, formatted with each line as one embedding.
|
||||
|
||||
```modelfile
|
||||
FROM <model name>:<tag>
|
||||
EMBED <file path>.txt
|
||||
EMBED <different file path>.txt
|
||||
EMBED <path to directory>/*.txt
|
||||
```
|
||||
|
||||
### PARAMETER
|
||||
|
||||
The `PARAMETER` instruction defines a parameter that can be set when the model is run.
|
||||
@@ -145,8 +124,8 @@ PARAMETER <parameter> <parametervalue>
|
||||
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
||||
| repeat_penalty | Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. (Default: 1.1) | float | repeat_penalty 1.1 |
|
||||
| temperature | The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8) | float | temperature 0.7 |
|
||||
| seed | Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: 0) | int | seed 42 |
|
||||
| stop | Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return. Multiple stop patterns may be set by specifying multiple separate `stop` parameters in a modelfile. | string | stop "AI assistant:" |
|
||||
| seed | Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. | int | seed 42 |
|
||||
| stop | Sets the stop sequences to use. | string | stop "AI assistant:" |
|
||||
| tfs_z | Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1) | float | tfs_z 1 |
|
||||
| num_predict | Maximum number of tokens to predict when generating text. (Default: 128, -1 = infinite generation, -2 = fill context) | int | num_predict 42 |
|
||||
| top_k | Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40) | int | top_k 40 |
|
||||
@@ -154,28 +133,35 @@ PARAMETER <parameter> <parametervalue>
|
||||
|
||||
### TEMPLATE
|
||||
|
||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system message, a user's message and the response from the model. Note: syntax may be model specific. Templates use Go [template syntax](https://pkg.go.dev/text/template).
|
||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system prompt and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
|
||||
|
||||
#### Template Variables
|
||||
|
||||
| Variable | Description |
|
||||
| ----------------- | --------------------------------------------------------------------------------------------- |
|
||||
| `{{ .System }}` | The system message used to specify custom behavior. |
|
||||
| `{{ .Prompt }}` | The user prompt message. |
|
||||
| `{{ .Response }}` | The response from the model. When generating a response, text after this variable is omitted. |
|
||||
| Variable | Description |
|
||||
| --------------- | ------------------------------------------------------------------------------------------------------------ |
|
||||
| `{{ .System }}` | The system prompt used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
|
||||
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
|
||||
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
|
||||
|
||||
```
|
||||
TEMPLATE """{{ if .System }}<|im_start|>system
|
||||
{{ .System }}<|im_end|>
|
||||
{{ end }}{{ if .Prompt }}<|im_start|>user
|
||||
{{ .Prompt }}<|im_end|>
|
||||
{{ end }}<|im_start|>assistant
|
||||
```modelfile
|
||||
TEMPLATE """
|
||||
{{- if .First }}
|
||||
### System:
|
||||
{{ .System }}
|
||||
{{- end }}
|
||||
|
||||
### User:
|
||||
{{ .Prompt }}
|
||||
|
||||
### Response:
|
||||
"""
|
||||
|
||||
SYSTEM """<system message>"""
|
||||
```
|
||||
|
||||
### SYSTEM
|
||||
|
||||
The `SYSTEM` instruction specifies the system message to be used in the template, if applicable.
|
||||
The `SYSTEM` instruction specifies the system prompt to be used in the template, if applicable.
|
||||
|
||||
```modelfile
|
||||
SYSTEM """<system message>"""
|
||||
@@ -199,22 +185,7 @@ LICENSE """
|
||||
"""
|
||||
```
|
||||
|
||||
### MESSAGE
|
||||
|
||||
The `MESSAGE` instruction allows you to specify a message history for the model to use when responding:
|
||||
|
||||
```modelfile
|
||||
MESSAGE user Is Toronto in Canada?
|
||||
MESSAGE assistant yes
|
||||
MESSAGE user Is Sacramento in Canada?
|
||||
MESSAGE assistant no
|
||||
MESSAGE user Is Ontario in Canada?
|
||||
MESSAGE assistant yes
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- the **`Modelfile` is not case sensitive**. In the examples, uppercase instructions are used to make it easier to distinguish it from arguments.
|
||||
- Instructions can be in any order. In the examples, the `FROM` instruction is first to keep it easily readable.
|
||||
|
||||
[1]: https://ollama.com/library
|
||||
- the **`Modelfile` is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
|
||||
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.
|
||||
|
141
docs/openai.md
@@ -1,141 +0,0 @@
|
||||
# OpenAI compatibility
|
||||
|
||||
> **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/jmorganca/ollama/blob/main/docs/api.md).
|
||||
|
||||
Ollama provides experimental compatibility with parts of the [OpenAI API](https://platform.openai.com/docs/api-reference) to help connect existing applications to Ollama.
|
||||
|
||||
## Usage
|
||||
|
||||
### OpenAI Python library
|
||||
|
||||
```python
|
||||
from openai import OpenAI
|
||||
|
||||
client = OpenAI(
|
||||
base_url='http://localhost:11434/v1/',
|
||||
|
||||
# required but ignored
|
||||
api_key='ollama',
|
||||
)
|
||||
|
||||
chat_completion = client.chat.completions.create(
|
||||
messages=[
|
||||
{
|
||||
'role': 'user',
|
||||
'content': 'Say this is a test',
|
||||
}
|
||||
],
|
||||
model='llama2',
|
||||
)
|
||||
```
|
||||
|
||||
### OpenAI JavaScript library
|
||||
|
||||
```javascript
|
||||
import OpenAI from 'openai'
|
||||
|
||||
const openai = new OpenAI({
|
||||
baseURL: 'http://localhost:11434/v1/',
|
||||
|
||||
// required but ignored
|
||||
apiKey: 'ollama',
|
||||
})
|
||||
|
||||
const chatCompletion = await openai.chat.completions.create({
|
||||
messages: [{ role: 'user', content: 'Say this is a test' }],
|
||||
model: 'llama2',
|
||||
})
|
||||
```
|
||||
|
||||
### `curl`
|
||||
|
||||
```
|
||||
curl http://localhost:11434/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "llama2",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hello!"
|
||||
}
|
||||
]
|
||||
}'
|
||||
```
|
||||
|
||||
## Endpoints
|
||||
|
||||
### `/v1/chat/completions`
|
||||
|
||||
#### Supported features
|
||||
|
||||
- [x] Chat completions
|
||||
- [x] Streaming
|
||||
- [x] JSON mode
|
||||
- [x] Reproducible outputs
|
||||
- [ ] Vision
|
||||
- [ ] Function calling
|
||||
- [ ] Logprobs
|
||||
|
||||
#### Supported request fields
|
||||
|
||||
- [x] `model`
|
||||
- [x] `messages`
|
||||
- [x] Text `content`
|
||||
- [ ] Array of `content` parts
|
||||
- [x] `frequency_penalty`
|
||||
- [x] `presence_penalty`
|
||||
- [x] `response_format`
|
||||
- [x] `seed`
|
||||
- [x] `stop`
|
||||
- [x] `stream`
|
||||
- [x] `temperature`
|
||||
- [x] `top_p`
|
||||
- [x] `max_tokens`
|
||||
- [ ] `logit_bias`
|
||||
- [ ] `tools`
|
||||
- [ ] `tool_choice`
|
||||
- [ ] `user`
|
||||
- [ ] `n`
|
||||
|
||||
#### Notes
|
||||
|
||||
- Setting `seed` will always set `temperature` to `0`
|
||||
- `finish_reason` will always be `stop`
|
||||
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
||||
|
||||
## Models
|
||||
|
||||
Before using a model, pull it locally `ollama pull`:
|
||||
|
||||
```shell
|
||||
ollama pull llama2
|
||||
```
|
||||
|
||||
### Default model names
|
||||
|
||||
For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name:
|
||||
|
||||
```
|
||||
ollama cp llama2 gpt-3.5-turbo
|
||||
```
|
||||
|
||||
Afterwards, this new model name can be specified the `model` field:
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/v1/chat/completions \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hello!"
|
||||
}
|
||||
]
|
||||
}'
|
||||
```
|
111
docs/quantize.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# How to Quantize a Model
|
||||
|
||||
Sometimes the model you want to work with is not available at [https://ollama.ai/library](https://ollama.ai/library).
|
||||
|
||||
## Figure out if we can run the model?
|
||||
|
||||
Not all models will work with Ollama. There are a number of factors that go into whether we are able to work with the next cool model. First it has to work with llama.cpp. Then we have to have implemented the features of llama.cpp that it requires. And then, sometimes, even with both of those, the model might not work...
|
||||
|
||||
1. What is the model you want to convert and upload?
|
||||
2. Visit the model's page on HuggingFace.
|
||||
3. Switch to the **Files and versions** tab.
|
||||
4. Click on the **config.json** file. If there is no config.json file, it may not work.
|
||||
5. Take note of the **architecture** list in the json file.
|
||||
6. Does any entry in the list match one of the following architectures?
|
||||
1. LlamaForCausalLM
|
||||
2. MistralForCausalLM
|
||||
3. RWForCausalLM
|
||||
4. FalconForCausalLM
|
||||
5. GPTNeoXForCausalLM
|
||||
6. GPTBigCodeForCausalLM
|
||||
7. If the answer is yes, then there is a good chance the model will run after being converted and quantized.
|
||||
8. An alternative to this process is to visit [https://caniquant.tvl.st](https://caniquant.tvl.st) and enter the org/modelname in the box and submit.
|
||||
|
||||
At this point there are two processes you can use. You can either use a Docker container to convert and quantize, OR you can manually run the scripts. The Docker container is the easiest way to do it, but it requires you to have Docker installed on your machine. If you don't have Docker installed, you can follow the manual process.
|
||||
|
||||
## Convert and Quantize with Docker
|
||||
|
||||
Run `docker run --rm -v /path/to/model/repo:/repo ollama/quantize -q quantlevel /repo`. For instance, if you have downloaded the latest Mistral 7B model, then clone it to your machine. Then change into that directory and you can run:
|
||||
|
||||
```shell
|
||||
docker run --rm -v .:/repo ollama/quantize -q q4_0 /repo
|
||||
```
|
||||
|
||||
You can find the different quantization levels below under **Quantize the Model**.
|
||||
|
||||
This will output two files into the directory. First is a f16.bin file that is the model converted to GGUF. The second file is a q4_0.bin file which is the model quantized to a 4 bit quantization. You should rename it to something more descriptive.
|
||||
|
||||
You can find the repository for the Docker container here: [https://github.com/mxyng/quantize](https://github.com/mxyng/quantize)
|
||||
|
||||
For instance, if you wanted to convert the Mistral 7B model to a Q4 quantized model, then you could go through the following steps:
|
||||
|
||||
1. First verify the model will potentially work.
|
||||
2. Now clone Mistral 7B to your machine. You can find the command to run when you click the three vertical dots button on the model page, then click **Clone Repository**.
|
||||
1. For this repo, the command is:
|
||||
|
||||
```shell
|
||||
git lfs install
|
||||
git clone https://huggingface.co/mistralai/Mistral-7B-v0.1
|
||||
```
|
||||
|
||||
2. Navigate into the new directory and run `docker run --rm -v .:/repo ollama/quantize -q q4_0 /repo`
|
||||
3. Now you can create a modelfile using the q4_0.bin file that was created.
|
||||
|
||||
## Convert and Quantize Manually
|
||||
|
||||
### Clone llama.cpp to your machine
|
||||
|
||||
If we know the model has a chance of working, then we need to convert and quantize. This is a matter of running two separate scripts in the llama.cpp project.
|
||||
|
||||
1. Decide where you want the llama.cpp repository on your machine.
|
||||
2. Navigate to that location and then run:
|
||||
[`git clone https://github.com/ggerganov/llama.cpp.git`](https://github.com/ggerganov/llama.cpp.git)
|
||||
1. If you don't have git installed, download this zip file and unzip it to that location: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.zip
|
||||
3. Install the Python dependencies: `pip install torch transformers sentencepiece`
|
||||
4. Run 'make' to build the project and the quantize executable.
|
||||
|
||||
### Convert the model to GGUF
|
||||
|
||||
1. Decide on the right convert script to run. What was the model architecture you found in the first section.
|
||||
1. LlamaForCausalLM or MistralForCausalLM:
|
||||
run `python3 convert.py <modelfilename>`
|
||||
No need to specify fp16 or fp32.
|
||||
2. FalconForCausalLM or RWForCausalLM:
|
||||
run `python3 convert-falcon-hf-to-gguf.py <modelfilename> <fpsize>`
|
||||
fpsize depends on the weight size. 1 for fp16, 0 for fp32
|
||||
3. GPTNeoXForCausalLM:
|
||||
run `python3 convert-gptneox-hf-to-gguf.py <modelfilename> <fpsize>`
|
||||
fpsize depends on the weight size. 1 for fp16, 0 for fp32
|
||||
4. GPTBigCodeForCausalLM:
|
||||
run `python3 convert-starcoder-hf-to-gguf.py <modelfilename> <fpsize>`
|
||||
fpsize depends on the weight size. 1 for fp16, 0 for fp32
|
||||
|
||||
### Quantize the model
|
||||
|
||||
If the model converted successfully, there is a good chance it will also quantize successfully. Now you need to decide on the quantization to use. We will always try to create all the quantizations and upload them to the library. You should decide which level is more important to you and quantize accordingly.
|
||||
|
||||
The quantization options are as follows. Note that some architectures such as Falcon do not support K quants.
|
||||
|
||||
- Q4_0
|
||||
- Q4_1
|
||||
- Q5_0
|
||||
- Q5_1
|
||||
- Q2_K
|
||||
- Q3_K
|
||||
- Q3_K_S
|
||||
- Q3_K_M
|
||||
- Q3_K_L
|
||||
- Q4_K
|
||||
- Q4_K_S
|
||||
- Q4_K_M
|
||||
- Q5_K
|
||||
- Q5_K_S
|
||||
- Q5_K_M
|
||||
- Q6_K
|
||||
- Q8_0
|
||||
|
||||
Run the following command `quantize <converted model from above> <output file> <quantization type>`
|
||||
|
||||
## Now Create the Model
|
||||
|
||||
Now you can create the Ollama model. Refer to the [modelfile](./modelfile.md) doc for more information on doing that.
|
@@ -1,72 +0,0 @@
|
||||
# How to troubleshoot issues
|
||||
|
||||
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
|
||||
|
||||
```shell
|
||||
cat ~/.ollama/logs/server.log
|
||||
```
|
||||
|
||||
On **Linux** systems with systemd, the logs can be found with this command:
|
||||
|
||||
```shell
|
||||
journalctl -u ollama
|
||||
```
|
||||
|
||||
When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
|
||||
|
||||
```shell
|
||||
docker logs <container-name>
|
||||
```
|
||||
(Use `docker ps` to find the container name)
|
||||
|
||||
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||
|
||||
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
||||
|
||||
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
||||
```powershell
|
||||
$env:OLLAMA_DEBUG="1"
|
||||
& "ollama app.exe"
|
||||
```
|
||||
|
||||
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
||||
|
||||
## LLM libraries
|
||||
|
||||
Ollama includes multiple LLM libraries compiled for different GPUs and CPU
|
||||
vector features. Ollama tries to pick the best one based on the capabilities of
|
||||
your system. If this autodetection has problems, or you run into other problems
|
||||
(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM
|
||||
library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest
|
||||
but most compatible is `cpu`. Rosetta emulation under MacOS will work with the
|
||||
`cpu` library.
|
||||
|
||||
In the server log, you will see a message that looks something like this (varies
|
||||
from release to release):
|
||||
|
||||
```
|
||||
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
||||
```
|
||||
|
||||
**Experimental LLM Library Override**
|
||||
|
||||
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass
|
||||
autodetection, so for example, if you have a CUDA card, but want to force the
|
||||
CPU LLM library with AVX2 vector support, use:
|
||||
|
||||
```
|
||||
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
||||
```
|
||||
|
||||
You can see what features your CPU has with the following.
|
||||
```
|
||||
cat /proc/cpuinfo| grep flags | head -1
|
||||
```
|
||||
|
||||
## Known issues
|
||||
|
||||
* N/A
|
@@ -4,6 +4,5 @@ Here is a list of ways you can use Ollama with other tools to build interesting
|
||||
|
||||
- [Using LangChain with Ollama in JavaScript](./tutorials/langchainjs.md)
|
||||
- [Using LangChain with Ollama in Python](./tutorials/langchainpy.md)
|
||||
- [Running Ollama on NVIDIA Jetson Devices](./tutorials/nvidia-jetson.md)
|
||||
|
||||
Also be sure to check out the [examples](../examples) directory for more ways to use Ollama.
|
||||
Also be sure to check out the [examples](../examples) directory for more ways to use Ollama.
|
@@ -1,83 +0,0 @@
|
||||
# Running Ollama on Fly.io GPU Instances
|
||||
|
||||
Ollama runs with little to no configuration on [Fly.io GPU instances](https://fly.io/docs/gpus/gpu-quickstart/). If you don't have access to GPUs yet, you'll need to [apply for access](https://fly.io/gpu/) on the waitlist. Once you're accepted, you'll get an email with instructions on how to get started.
|
||||
|
||||
Create a new app with `fly apps create`:
|
||||
|
||||
```bash
|
||||
fly apps create
|
||||
```
|
||||
|
||||
Then create a `fly.toml` file in a new folder that looks like this:
|
||||
|
||||
```toml
|
||||
app = "sparkling-violet-709"
|
||||
primary_region = "ord"
|
||||
vm.size = "a100-40gb" # see https://fly.io/docs/gpus/gpu-quickstart/ for more info
|
||||
|
||||
[build]
|
||||
image = "ollama/ollama"
|
||||
|
||||
[http_service]
|
||||
internal_port = 11434
|
||||
force_https = false
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
processes = ["app"]
|
||||
|
||||
[mounts]
|
||||
source = "models"
|
||||
destination = "/root/.ollama"
|
||||
initial_size = "100gb"
|
||||
```
|
||||
|
||||
Then create a [new private IPv6 address](https://fly.io/docs/reference/private-networking/#flycast-private-load-balancing) for your app:
|
||||
|
||||
```bash
|
||||
fly ips allocate-v6 --private
|
||||
```
|
||||
|
||||
Then deploy your app:
|
||||
|
||||
```bash
|
||||
fly deploy
|
||||
```
|
||||
|
||||
And finally you can access it interactively with a new Fly.io Machine:
|
||||
|
||||
```
|
||||
fly machine run -e OLLAMA_HOST=http://your-app-name.flycast --shell ollama/ollama
|
||||
```
|
||||
|
||||
```bash
|
||||
$ ollama run openchat:7b-v3.5-fp16
|
||||
>>> How do I bake chocolate chip cookies?
|
||||
To bake chocolate chip cookies, follow these steps:
|
||||
|
||||
1. Preheat the oven to 375°F (190°C) and line a baking sheet with parchment paper or silicone baking mat.
|
||||
|
||||
2. In a large bowl, mix together 1 cup of unsalted butter (softened), 3/4 cup granulated sugar, and 3/4
|
||||
cup packed brown sugar until light and fluffy.
|
||||
|
||||
3. Add 2 large eggs, one at a time, to the butter mixture, beating well after each addition. Stir in 1
|
||||
teaspoon of pure vanilla extract.
|
||||
|
||||
4. In a separate bowl, whisk together 2 cups all-purpose flour, 1/2 teaspoon baking soda, and 1/2 teaspoon
|
||||
salt. Gradually add the dry ingredients to the wet ingredients, stirring until just combined.
|
||||
|
||||
5. Fold in 2 cups of chocolate chips (or chunks) into the dough.
|
||||
|
||||
6. Drop rounded tablespoons of dough onto the prepared baking sheet, spacing them about 2 inches apart.
|
||||
|
||||
7. Bake for 10-12 minutes, or until the edges are golden brown. The centers should still be slightly soft.
|
||||
|
||||
8. Allow the cookies to cool on the baking sheet for a few minutes before transferring them to a wire rack
|
||||
to cool completely.
|
||||
|
||||
Enjoy your homemade chocolate chip cookies!
|
||||
```
|
||||
|
||||
When you set it up like this, it will automatically turn off when you're done using it. Then when you access it again, it will automatically turn back on. This is a great way to save money on GPU instances when you're not using them. If you want a persistent wake-on-use connection to your Ollama instance, you can set up a [connection to your Fly network using WireGuard](https://fly.io/docs/reference/private-networking/#discovering-apps-through-dns-on-a-wireguard-connection). Then you can access your Ollama instance at `http://your-app-name.flycast`.
|
||||
|
||||
And that's it!
|