Compare commits
1 Commits
whitespace
...
mattw/faq-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1eefebe392 |
@@ -2,7 +2,7 @@
|
|||||||
ollama
|
ollama
|
||||||
app
|
app
|
||||||
dist
|
dist
|
||||||
llm/llama.cpp
|
llm/llama.cpp/gguf
|
||||||
.env
|
.env
|
||||||
.cache
|
.cache
|
||||||
test_data
|
test_data
|
||||||
162
.github/workflows/test.yaml
vendored
@@ -1,162 +0,0 @@
|
|||||||
name: test
|
|
||||||
|
|
||||||
on:
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
generate:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
|
||||||
arch: [amd64, arm64]
|
|
||||||
exclude:
|
|
||||||
- os: ubuntu-latest
|
|
||||||
arch: arm64
|
|
||||||
- os: windows-latest
|
|
||||||
arch: arm64
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
env:
|
|
||||||
GOARCH: ${{ matrix.arch }}
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
cache: true
|
|
||||||
- run: go get ./...
|
|
||||||
- run: go generate -x ./...
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
|
||||||
path: llm/llama.cpp/build/**/lib/*
|
|
||||||
generate-cuda:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
cuda-version:
|
|
||||||
- '11.8.0'
|
|
||||||
runs-on: linux
|
|
||||||
container: nvidia/cuda:${{ matrix.cuda-version }}-devel-ubuntu20.04
|
|
||||||
steps:
|
|
||||||
- run: |
|
|
||||||
apt-get update && apt-get install -y git build-essential curl
|
|
||||||
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
|
||||||
| tar -zx -C /usr --strip-components 1
|
|
||||||
env:
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
cache: true
|
|
||||||
- run: go get ./...
|
|
||||||
- run: |
|
|
||||||
git config --global --add safe.directory /__w/ollama/ollama
|
|
||||||
go generate -x ./...
|
|
||||||
env:
|
|
||||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: cuda-${{ matrix.cuda-version }}-libraries
|
|
||||||
path: llm/llama.cpp/build/**/lib/*
|
|
||||||
generate-rocm:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
rocm-version:
|
|
||||||
- '5.7.1'
|
|
||||||
- '6.0'
|
|
||||||
runs-on: linux
|
|
||||||
container: rocm/dev-ubuntu-20.04:${{ matrix.rocm-version }}
|
|
||||||
steps:
|
|
||||||
- run: |
|
|
||||||
apt-get update && apt-get install -y git build-essential curl rocm-libs
|
|
||||||
curl -fsSL https://github.com/Kitware/CMake/releases/download/v3.28.1/cmake-3.28.1-linux-x86_64.tar.gz \
|
|
||||||
| tar -zx -C /usr --strip-components 1
|
|
||||||
env:
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
- uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
cache: true
|
|
||||||
- run: go get ./...
|
|
||||||
- run: |
|
|
||||||
git config --global --add safe.directory /__w/ollama/ollama
|
|
||||||
go generate -x ./...
|
|
||||||
env:
|
|
||||||
OLLAMA_SKIP_CPU_GENERATE: '1'
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: rocm-${{ matrix.rocm-version }}-libraries
|
|
||||||
path: llm/llama.cpp/build/**/lib/*
|
|
||||||
lint:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
|
||||||
arch: [amd64, arm64]
|
|
||||||
exclude:
|
|
||||||
- os: ubuntu-latest
|
|
||||||
arch: arm64
|
|
||||||
- os: windows-latest
|
|
||||||
arch: arm64
|
|
||||||
- os: macos-latest
|
|
||||||
arch: amd64
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
env:
|
|
||||||
GOARCH: ${{ matrix.arch }}
|
|
||||||
CGO_ENABLED: "1"
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
cache: false
|
|
||||||
- run: |
|
|
||||||
mkdir -p llm/llama.cpp/build/linux/${{ matrix.arch }}/stub/lib/
|
|
||||||
touch llm/llama.cpp/build/linux/${{ matrix.arch }}/stub/lib/stub.so
|
|
||||||
if: ${{ startsWith(matrix.os, 'ubuntu-') }}
|
|
||||||
- run: |
|
|
||||||
mkdir -p llm/llama.cpp/build/darwin/${{ matrix.arch }}/stub/lib/
|
|
||||||
touch llm/llama.cpp/build/darwin/${{ matrix.arch }}/stub/lib/stub.dylib
|
|
||||||
touch llm/llama.cpp/ggml-metal.metal
|
|
||||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
|
||||||
- run: |
|
|
||||||
mkdir -p llm/llama.cpp/build/windows/${{ matrix.arch }}/stub/lib/
|
|
||||||
touch llm/llama.cpp/build/windows/${{ matrix.arch }}/stub/lib/stub.dll
|
|
||||||
if: ${{ startsWith(matrix.os, 'windows-') }}
|
|
||||||
- uses: golangci/golangci-lint-action@v3
|
|
||||||
test:
|
|
||||||
needs: generate
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
|
||||||
arch: [amd64]
|
|
||||||
exclude:
|
|
||||||
- os: ubuntu-latest
|
|
||||||
arch: arm64
|
|
||||||
- os: windows-latest
|
|
||||||
arch: arm64
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
env:
|
|
||||||
GOARCH: ${{ matrix.arch }}
|
|
||||||
CGO_ENABLED: "1"
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: recursive
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
cache: true
|
|
||||||
- run: go get
|
|
||||||
- uses: actions/download-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
|
||||||
path: llm/llama.cpp/build
|
|
||||||
- run: go build
|
|
||||||
- run: go test -v ./...
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: ${{ matrix.os }}-binaries
|
|
||||||
path: ollama
|
|
||||||
3
.gitignore
vendored
@@ -9,5 +9,4 @@ ggml-metal.metal
|
|||||||
.cache
|
.cache
|
||||||
*.exe
|
*.exe
|
||||||
.idea
|
.idea
|
||||||
test_data
|
test_data
|
||||||
*.crt
|
|
||||||
9
.gitmodules
vendored
@@ -1,4 +1,5 @@
|
|||||||
[submodule "llama.cpp"]
|
[submodule "llm/llama.cpp/gguf"]
|
||||||
path = llm/llama.cpp
|
path = llm/llama.cpp/gguf
|
||||||
url = https://github.com/ggerganov/llama.cpp.git
|
url = https://github.com/ggerganov/llama.cpp.git
|
||||||
shallow = true
|
ignore = dirty
|
||||||
|
shallow = true
|
||||||
|
|||||||
@@ -1,27 +0,0 @@
|
|||||||
run:
|
|
||||||
timeout: 5m
|
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
- asasalint
|
|
||||||
- bidichk
|
|
||||||
- bodyclose
|
|
||||||
- containedctx
|
|
||||||
- contextcheck
|
|
||||||
- exportloopref
|
|
||||||
- gocheckcompilerdirectives
|
|
||||||
# FIXME: for some reason this errors on windows
|
|
||||||
# - gofmt
|
|
||||||
# - goimports
|
|
||||||
- misspell
|
|
||||||
- nilerr
|
|
||||||
- unused
|
|
||||||
linters-settings:
|
|
||||||
errcheck:
|
|
||||||
# exclude the following functions since we don't generally
|
|
||||||
# need to be concerned with the returned errors
|
|
||||||
exclude-functions:
|
|
||||||
- encoding/binary.Read
|
|
||||||
- (*os.File).Seek
|
|
||||||
- (*bufio.Writer).WriteString
|
|
||||||
- (*github.com/spf13/pflag.FlagSet).Set
|
|
||||||
- (*github.com/jmorganca/ollama/llm.readSeekOffset).Seek
|
|
||||||
138
Dockerfile
@@ -1,135 +1,27 @@
|
|||||||
ARG GOLANG_VERSION=1.21.3
|
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
|
||||||
ARG CMAKE_VERSION=3.22.1
|
|
||||||
ARG CUDA_VERSION=11.3.1
|
|
||||||
|
|
||||||
# Copy the minimal context we need to run the generate scripts
|
ARG TARGETARCH
|
||||||
FROM scratch AS llm-code
|
ARG GOFLAGS="'-ldflags=-w -s'"
|
||||||
COPY .git .git
|
|
||||||
COPY .gitmodules .gitmodules
|
|
||||||
COPY llm llm
|
|
||||||
|
|
||||||
FROM --platform=linux/amd64 nvidia/cuda:$CUDA_VERSION-devel-centos7 AS cuda-build-amd64
|
|
||||||
ARG CMAKE_VERSION
|
|
||||||
COPY ./scripts/rh_linux_deps.sh /
|
|
||||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
|
||||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
|
||||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
|
||||||
|
|
||||||
FROM --platform=linux/arm64 nvidia/cuda:$CUDA_VERSION-devel-rockylinux8 AS cuda-build-arm64
|
|
||||||
ARG CMAKE_VERSION
|
|
||||||
COPY ./scripts/rh_linux_deps.sh /
|
|
||||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
|
||||||
ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH
|
|
||||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
|
||||||
|
|
||||||
FROM --platform=linux/amd64 rocm/dev-centos-7:5.7.1-complete AS rocm-5-build-amd64
|
|
||||||
ARG CMAKE_VERSION
|
|
||||||
COPY ./scripts/rh_linux_deps.sh /
|
|
||||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
|
||||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
|
||||||
ENV LIBRARY_PATH /opt/amdgpu/lib64
|
|
||||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
ARG AMDGPU_TARGETS
|
|
||||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
|
||||||
|
|
||||||
FROM --platform=linux/amd64 rocm/dev-centos-7:6.0-complete AS rocm-6-build-amd64
|
|
||||||
ARG CMAKE_VERSION
|
|
||||||
COPY ./scripts/rh_linux_deps.sh /
|
|
||||||
RUN CMAKE_VERSION=${CMAKE_VERSION} sh /rh_linux_deps.sh
|
|
||||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
|
||||||
ENV LIBRARY_PATH /opt/amdgpu/lib64
|
|
||||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
ARG AMDGPU_TARGETS
|
|
||||||
RUN OLLAMA_SKIP_CPU_GENERATE=1 sh gen_linux.sh
|
|
||||||
|
|
||||||
FROM --platform=linux/amd64 centos:7 AS cpu-builder-amd64
|
|
||||||
ARG CMAKE_VERSION
|
|
||||||
ARG GOLANG_VERSION
|
|
||||||
COPY ./scripts/rh_linux_deps.sh /
|
|
||||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
|
||||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
|
||||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
|
||||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
|
||||||
|
|
||||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu-build-amd64
|
|
||||||
RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
|
|
||||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx-build-amd64
|
|
||||||
RUN OLLAMA_CPU_TARGET="cpu_avx" sh gen_linux.sh
|
|
||||||
FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64
|
|
||||||
RUN OLLAMA_CPU_TARGET="cpu_avx2" sh gen_linux.sh
|
|
||||||
|
|
||||||
FROM --platform=linux/arm64 centos:7 AS cpu-build-arm64
|
|
||||||
ARG CMAKE_VERSION
|
|
||||||
ARG GOLANG_VERSION
|
|
||||||
COPY ./scripts/rh_linux_deps.sh /
|
|
||||||
RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh
|
|
||||||
ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH
|
|
||||||
COPY --from=llm-code / /go/src/github.com/jmorganca/ollama/
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama/llm/generate
|
|
||||||
# Note, we only build the "base" CPU variant on arm since avx/avx2 are x86 features
|
|
||||||
ARG OLLAMA_CUSTOM_CPU_DEFS
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
RUN OLLAMA_CPU_TARGET="cpu" sh gen_linux.sh
|
|
||||||
|
|
||||||
# Intermediate stage used for ./scripts/build_linux.sh
|
|
||||||
FROM --platform=linux/amd64 cpu-build-amd64 AS build-amd64
|
|
||||||
ENV CGO_ENABLED 1
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||||
|
RUN apt-get update && apt-get install -y git build-essential cmake
|
||||||
|
ADD https://dl.google.com/go/go1.21.3.linux-$TARGETARCH.tar.gz /tmp/go1.21.3.tar.gz
|
||||||
|
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
COPY --from=cpu_avx-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
ENV GOARCH=$TARGETARCH
|
||||||
COPY --from=cpu_avx2-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
ENV GOFLAGS=$GOFLAGS
|
||||||
COPY --from=cuda-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
RUN /usr/local/go/bin/go generate ./... \
|
||||||
COPY --from=rocm-5-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
&& /usr/local/go/bin/go build .
|
||||||
COPY --from=rocm-6-build-amd64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
|
||||||
ARG GOFLAGS
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
RUN go build .
|
|
||||||
|
|
||||||
# Intermediate stage used for ./scripts/build_linux.sh
|
FROM ubuntu:22.04
|
||||||
FROM --platform=linux/arm64 cpu-build-arm64 AS build-arm64
|
|
||||||
ENV CGO_ENABLED 1
|
|
||||||
ARG GOLANG_VERSION
|
|
||||||
WORKDIR /go/src/github.com/jmorganca/ollama
|
|
||||||
COPY . .
|
|
||||||
COPY --from=cuda-build-arm64 /go/src/github.com/jmorganca/ollama/llm/llama.cpp/build/linux/ llm/llama.cpp/build/linux/
|
|
||||||
ARG GOFLAGS
|
|
||||||
ARG CGO_CFLAGS
|
|
||||||
RUN go build .
|
|
||||||
|
|
||||||
# Runtime stages
|
|
||||||
FROM --platform=linux/amd64 ubuntu:22.04 as runtime-amd64
|
|
||||||
RUN apt-get update && apt-get install -y ca-certificates
|
RUN apt-get update && apt-get install -y ca-certificates
|
||||||
COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
COPY --from=0 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
||||||
FROM --platform=linux/arm64 ubuntu:22.04 as runtime-arm64
|
|
||||||
RUN apt-get update && apt-get install -y ca-certificates
|
|
||||||
COPY --from=build-arm64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
|
||||||
|
|
||||||
# Radeon images are much larger so we keep it distinct from the CPU/CUDA image
|
|
||||||
FROM --platform=linux/amd64 rocm/dev-centos-7:5.7.1-complete as runtime-rocm
|
|
||||||
RUN update-pciids
|
|
||||||
COPY --from=build-amd64 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
|
|
||||||
EXPOSE 11434
|
EXPOSE 11434
|
||||||
ENV OLLAMA_HOST 0.0.0.0
|
ENV OLLAMA_HOST 0.0.0.0
|
||||||
|
|
||||||
ENTRYPOINT ["/bin/ollama"]
|
# set some environment variable for better NVIDIA compatibility
|
||||||
CMD ["serve"]
|
ENV PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||||
|
|
||||||
FROM runtime-$TARGETARCH
|
|
||||||
EXPOSE 11434
|
|
||||||
ENV OLLAMA_HOST 0.0.0.0
|
|
||||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
|
||||||
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
ENV LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64
|
||||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||||
|
|
||||||
|
|||||||
74
Dockerfile.build
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# Ubuntu 20.04 amd64 dependencies
|
||||||
|
FROM --platform=linux/amd64 ubuntu:20.04 AS base-amd64
|
||||||
|
ARG CUDA_VERSION=11.3.1-1
|
||||||
|
ARG CMAKE_VERSION=3.22.1
|
||||||
|
# ROCm only supports amd64
|
||||||
|
ARG ROCM_VERSION=6.0
|
||||||
|
ARG CLBLAST_VER=1.6.1
|
||||||
|
|
||||||
|
# Note: https://rocm.docs.amd.com/en/latest/release/user_kernel_space_compat_matrix.html
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y wget gnupg && \
|
||||||
|
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin && \
|
||||||
|
mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 && \
|
||||||
|
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub && \
|
||||||
|
echo "deb [by-hash=no] https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /" > /etc/apt/sources.list.d/cuda.list && \
|
||||||
|
wget "https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.sh" -O /tmp/cmake-installer.sh && \
|
||||||
|
chmod +x /tmp/cmake-installer.sh && /tmp/cmake-installer.sh --skip-license --prefix=/usr && \
|
||||||
|
mkdir --parents --mode=0755 /etc/apt/keyrings && \
|
||||||
|
wget https://repo.radeon.com/rocm/rocm.gpg.key -O - | gpg --dearmor > /etc/apt/keyrings/rocm.gpg && \
|
||||||
|
echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/rocm.gpg] https://repo.radeon.com/rocm/apt/${ROCM_VERSION} focal main" > /etc/apt/sources.list.d/rocm.list && \
|
||||||
|
echo "Package: *" > /etc/apt/preferences.d/rocm-pin-600 && \
|
||||||
|
echo "Pin: release o=repo.radeon.com" >> /etc/apt/preferences.d/rocm-pin-600 && \
|
||||||
|
echo "Pin-Priority: 600" >> /etc/apt/preferences.d/rocm-pin-600 && \
|
||||||
|
apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get -y install cuda=${CUDA_VERSION} rocm-hip-libraries rocm-device-libs rocm-libs rocm-ocl-icd rocm-hip-sdk rocm-hip-libraries rocm-cmake rocm-clang-ocl rocm-dev
|
||||||
|
|
||||||
|
# CLBlast
|
||||||
|
RUN wget -qO- https://github.com/CNugteren/CLBlast/archive/refs/tags/${CLBLAST_VER}.tar.gz | tar zxv -C /tmp/ && \
|
||||||
|
cd /tmp/CLBlast-${CLBLAST_VER} && mkdir build && cd build && cmake .. && make && make install
|
||||||
|
|
||||||
|
ENV ROCM_PATH=/opt/rocm
|
||||||
|
|
||||||
|
# Ubuntu 22.04 arm64 dependencies
|
||||||
|
FROM --platform=linux/arm64 ubuntu:20.04 AS base-arm64
|
||||||
|
ARG CUDA_VERSION=11.3.1-1
|
||||||
|
ARG CMAKE_VERSION=3.27.6
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y wget gnupg && \
|
||||||
|
wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/sbsa/cuda-ubuntu2004.pin && \
|
||||||
|
mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 && \
|
||||||
|
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/sbsa//3bf863cc.pub && \
|
||||||
|
echo "deb [by-hash=no] https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/sbsa/ /" > /etc/apt/sources.list.d/cuda.list && \
|
||||||
|
wget "https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-aarch64.sh" -O /tmp/cmake-installer.sh && \
|
||||||
|
chmod +x /tmp/cmake-installer.sh && /tmp/cmake-installer.sh --skip-license --prefix=/usr && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-cache madison cuda && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get -y install cuda=${CUDA_VERSION}
|
||||||
|
|
||||||
|
FROM base-${TARGETARCH}
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG GOFLAGS="'-ldflags -w -s'"
|
||||||
|
ARG CGO_CFLAGS
|
||||||
|
ARG GOLANG_VERSION=1.21.3
|
||||||
|
|
||||||
|
# Common toolchain
|
||||||
|
RUN apt-get update && \
|
||||||
|
DEBIAN_FRONTEND=noninteractive apt-get install -y gcc-10 g++-10 cpp-10 git ocl-icd-opencl-dev && \
|
||||||
|
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10
|
||||||
|
|
||||||
|
# install go
|
||||||
|
ADD https://dl.google.com/go/go${GOLANG_VERSION}.linux-$TARGETARCH.tar.gz /tmp/go${GOLANG_VERSION}.tar.gz
|
||||||
|
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go${GOLANG_VERSION}.tar.gz
|
||||||
|
|
||||||
|
# build the final binary
|
||||||
|
WORKDIR /go/src/github.com/jmorganca/ollama
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
ENV GOOS=linux
|
||||||
|
ENV GOARCH=$TARGETARCH
|
||||||
|
ENV GOFLAGS=$GOFLAGS
|
||||||
|
ENV CGO_CFLAGS=${CGO_CFLAGS}
|
||||||
|
|
||||||
|
RUN /usr/local/go/bin/go generate ./... && \
|
||||||
|
/usr/local/go/bin/go build .
|
||||||
56
README.md
@@ -1,5 +1,8 @@
|
|||||||
<div align="center">
|
<div align="center">
|
||||||
<img alt="ollama" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
<picture>
|
||||||
|
<source media="(prefers-color-scheme: dark)" height="200px" srcset="https://github.com/jmorganca/ollama/assets/3325447/56ea1849-1284-4645-8970-956de6e51c3c">
|
||||||
|
<img alt="logo" height="200px" src="https://github.com/jmorganca/ollama/assets/3325447/0d0b44e2-8f4a-4e99-9b52-a5c1c741c8f7">
|
||||||
|
</picture>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
# Ollama
|
# Ollama
|
||||||
@@ -10,16 +13,16 @@ Get up and running with large language models locally.
|
|||||||
|
|
||||||
### macOS
|
### macOS
|
||||||
|
|
||||||
[Download](https://ollama.com/download/Ollama-darwin.zip)
|
[Download](https://ollama.ai/download/Ollama-darwin.zip)
|
||||||
|
|
||||||
### Windows preview
|
### Windows
|
||||||
|
|
||||||
[Download](https://ollama.com/download/OllamaSetup.exe)
|
Coming soon! For now, you can install Ollama on Windows via WSL2.
|
||||||
|
|
||||||
### Linux
|
### Linux & WSL2
|
||||||
|
|
||||||
```
|
```
|
||||||
curl -fsSL https://ollama.com/install.sh | sh
|
curl https://ollama.ai/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
|
[Manual install instructions](https://github.com/jmorganca/ollama/blob/main/docs/linux.md)
|
||||||
@@ -28,14 +31,9 @@ curl -fsSL https://ollama.com/install.sh | sh
|
|||||||
|
|
||||||
The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `ollama/ollama` is available on Docker Hub.
|
The official [Ollama Docker image](https://hub.docker.com/r/ollama/ollama) `ollama/ollama` is available on Docker Hub.
|
||||||
|
|
||||||
### Libraries
|
|
||||||
|
|
||||||
- [ollama-python](https://github.com/ollama/ollama-python)
|
|
||||||
- [ollama-js](https://github.com/ollama/ollama-js)
|
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
|
||||||
To run and chat with [Llama 2](https://ollama.com/library/llama2):
|
To run and chat with [Llama 2](https://ollama.ai/library/llama2):
|
||||||
|
|
||||||
```
|
```
|
||||||
ollama run llama2
|
ollama run llama2
|
||||||
@@ -43,15 +41,14 @@ ollama run llama2
|
|||||||
|
|
||||||
## Model library
|
## Model library
|
||||||
|
|
||||||
Ollama supports a list of models available on [ollama.com/library](https://ollama.com/library 'ollama model library')
|
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library 'ollama model library')
|
||||||
|
|
||||||
Here are some example models that can be downloaded:
|
Here are some example open-source models that can be downloaded:
|
||||||
|
|
||||||
| Model | Parameters | Size | Download |
|
| Model | Parameters | Size | Download |
|
||||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||||
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
|
| Llama 2 | 7B | 3.8GB | `ollama run llama2` |
|
||||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||||
| Dolphin Phi | 2.7B | 1.6GB | `ollama run dolphin-phi` |
|
|
||||||
| Phi-2 | 2.7B | 1.7GB | `ollama run phi` |
|
| Phi-2 | 2.7B | 1.7GB | `ollama run phi` |
|
||||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||||
@@ -65,7 +62,7 @@ Here are some example models that can be downloaded:
|
|||||||
|
|
||||||
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||||
|
|
||||||
## Customize a model
|
## Customize your own model
|
||||||
|
|
||||||
### Import from GGUF
|
### Import from GGUF
|
||||||
|
|
||||||
@@ -200,21 +197,18 @@ brew install cmake go
|
|||||||
```
|
```
|
||||||
|
|
||||||
Then generate dependencies:
|
Then generate dependencies:
|
||||||
|
|
||||||
```
|
```
|
||||||
go generate ./...
|
go generate ./...
|
||||||
```
|
```
|
||||||
|
|
||||||
Then build the binary:
|
Then build the binary:
|
||||||
|
|
||||||
```
|
```
|
||||||
go build .
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
More detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md)
|
More detailed instructions can be found in the [developer guide](https://github.com/jmorganca/ollama/blob/main/docs/development.md)
|
||||||
|
|
||||||
### Running local builds
|
|
||||||
|
|
||||||
|
### Running local builds
|
||||||
Next, start the server:
|
Next, start the server:
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -256,21 +250,17 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
## Community Integrations
|
## Community Integrations
|
||||||
|
|
||||||
### Web & Desktop
|
### Web & Desktop
|
||||||
|
|
||||||
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt)
|
- [Bionic GPT](https://github.com/bionic-gpt/bionic-gpt)
|
||||||
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
- [HTML UI](https://github.com/rtcfirefly/ollama-ui)
|
||||||
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
- [Chatbot UI](https://github.com/ivanfioravanti/chatbot-ollama)
|
||||||
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
- [Typescript UI](https://github.com/ollama-interface/Ollama-Gui?tab=readme-ov-file)
|
||||||
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
- [Minimalistic React UI for Ollama Models](https://github.com/richawo/minimal-llm-ui)
|
||||||
- [Open WebUI](https://github.com/open-webui/open-webui)
|
- [Web UI](https://github.com/ollama-webui/ollama-webui)
|
||||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||||
- [big-AGI](https://github.com/enricoros/big-agi/blob/main/docs/config-ollama.md)
|
- [big-AGI](https://github.com/enricoros/big-agi/blob/main/docs/config-ollama.md)
|
||||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||||
- [Amica](https://github.com/semperai/amica)
|
- [Amica](https://github.com/semperai/amica)
|
||||||
- [chatd](https://github.com/BruceMacD/chatd)
|
- [chatd](https://github.com/BruceMacD/chatd)
|
||||||
- [Ollama-SwiftUI](https://github.com/kghandour/Ollama-SwiftUI)
|
|
||||||
- [MindMac](https://mindmac.app)
|
|
||||||
- [NextJS Web Interface for Ollama](https://github.com/jakobhoeg/nextjs-ollama-llm-ui)
|
|
||||||
|
|
||||||
### Terminal
|
### Terminal
|
||||||
|
|
||||||
@@ -279,14 +269,10 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Emacs client](https://github.com/zweifisch/ollama)
|
- [Emacs client](https://github.com/zweifisch/ollama)
|
||||||
- [gen.nvim](https://github.com/David-Kunz/gen.nvim)
|
- [gen.nvim](https://github.com/David-Kunz/gen.nvim)
|
||||||
- [ollama.nvim](https://github.com/nomnivore/ollama.nvim)
|
- [ollama.nvim](https://github.com/nomnivore/ollama.nvim)
|
||||||
- [ollama-chat.nvim](https://github.com/gerazov/ollama-chat.nvim)
|
|
||||||
- [ogpt.nvim](https://github.com/huynle/ogpt.nvim)
|
- [ogpt.nvim](https://github.com/huynle/ogpt.nvim)
|
||||||
- [gptel Emacs client](https://github.com/karthink/gptel)
|
- [gptel Emacs client](https://github.com/karthink/gptel)
|
||||||
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
- [Oatmeal](https://github.com/dustinblackman/oatmeal)
|
||||||
- [cmdh](https://github.com/pgibler/cmdh)
|
- [cmdh](https://github.com/pgibler/cmdh)
|
||||||
- [tenere](https://github.com/pythops/tenere)
|
|
||||||
- [llm-ollama](https://github.com/taketwo/llm-ollama) for [Datasette's LLM CLI](https://llm.datasette.io/en/stable/).
|
|
||||||
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
|
||||||
|
|
||||||
### Database
|
### Database
|
||||||
|
|
||||||
@@ -295,17 +281,14 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
### Package managers
|
### Package managers
|
||||||
|
|
||||||
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
- [Pacman](https://archlinux.org/packages/extra/x86_64/ollama/)
|
||||||
- [Helm Chart](https://artifacthub.io/packages/helm/ollama-helm/ollama)
|
|
||||||
|
|
||||||
### Libraries
|
### Libraries
|
||||||
|
|
||||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||||
- [LangChain4j](https://github.com/langchain4j/langchain4j/tree/main/langchain4j-ollama)
|
|
||||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||||
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
|
|
||||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
||||||
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
||||||
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
||||||
@@ -313,15 +296,11 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
||||||
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel)
|
- [Ollama for Laravel](https://github.com/cloudstudio/ollama-laravel)
|
||||||
- [LangChainDart](https://github.com/davidmigloz/langchain_dart)
|
- [LangChainDart](https://github.com/davidmigloz/langchain_dart)
|
||||||
- [Semantic Kernel - Python](https://github.com/microsoft/semantic-kernel/tree/main/python/semantic_kernel/connectors/ai/ollama)
|
|
||||||
- [Haystack](https://github.com/deepset-ai/haystack-integrations/blob/main/integrations/ollama.md)
|
|
||||||
- [Ollama for R - rollama](https://github.com/JBGruber/rollama)
|
|
||||||
- [Ollama-ex for Elixir](https://github.com/lebrunel/ollama-ex)
|
|
||||||
|
|
||||||
### Mobile
|
### Mobile
|
||||||
|
|
||||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||||
- [Maid](https://github.com/Mobile-Artificial-Intelligence/maid)
|
- [Maid](https://github.com/danemadsen/Maid)
|
||||||
|
|
||||||
### Extensions & Plugins
|
### Extensions & Plugins
|
||||||
|
|
||||||
@@ -337,6 +316,3 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
|||||||
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
- [Rivet plugin](https://github.com/abrenneke/rivet-plugin-ollama)
|
||||||
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
- [Llama Coder](https://github.com/ex3ndr/llama-coder) (Copilot alternative using Ollama)
|
||||||
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
- [Obsidian BMO Chatbot plugin](https://github.com/longy2k/obsidian-bmo-chatbot)
|
||||||
- [Open Interpreter](https://docs.openinterpreter.com/language-model-setup/local-models/ollama)
|
|
||||||
- [twinny](https://github.com/rjmacarthy/twinny) (Copilot and Copilot chat alternative using Ollama)
|
|
||||||
- [Wingman-AI](https://github.com/RussellCanfield/wingman-ai) (Copilot code and chat alternative using Ollama and HuggingFace)
|
|
||||||
|
|||||||
@@ -309,13 +309,6 @@ func (c *Client) Heartbeat(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (c *Client) Embeddings(ctx context.Context, req *EmbeddingRequest) (*EmbeddingResponse, error) {
|
|
||||||
var resp EmbeddingResponse
|
|
||||||
if err := c.do(ctx, http.MethodPost, "/api/embeddings", req, &resp); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
||||||
if err := c.do(ctx, http.MethodHead, fmt.Sprintf("/api/blobs/%s", digest), nil, nil); err != nil {
|
if err := c.do(ctx, http.MethodHead, fmt.Sprintf("/api/blobs/%s", digest), nil, nil); err != nil {
|
||||||
|
|||||||
284
api/client.py
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
import os
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
import os
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
BASE_URL = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
||||||
|
|
||||||
|
# Generate a response for a given prompt with a provided model. This is a streaming endpoint, so will be a series of responses.
|
||||||
|
# The final response object will include statistics and additional data from the request. Use the callback function to override
|
||||||
|
# the default handler.
|
||||||
|
def generate(model_name, prompt, system=None, template=None, format="", context=None, options=None, callback=None):
|
||||||
|
try:
|
||||||
|
url = f"{BASE_URL}/api/generate"
|
||||||
|
payload = {
|
||||||
|
"model": model_name,
|
||||||
|
"prompt": prompt,
|
||||||
|
"system": system,
|
||||||
|
"template": template,
|
||||||
|
"context": context,
|
||||||
|
"options": options,
|
||||||
|
"format": format,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Remove keys with None values
|
||||||
|
payload = {k: v for k, v in payload.items() if v is not None}
|
||||||
|
|
||||||
|
with requests.post(url, json=payload, stream=True) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Creating a variable to hold the context history of the final chunk
|
||||||
|
final_context = None
|
||||||
|
|
||||||
|
# Variable to hold concatenated response strings if no callback is provided
|
||||||
|
full_response = ""
|
||||||
|
|
||||||
|
# Iterating over the response line by line and displaying the details
|
||||||
|
for line in response.iter_lines():
|
||||||
|
if line:
|
||||||
|
# Parsing each line (JSON chunk) and extracting the details
|
||||||
|
chunk = json.loads(line)
|
||||||
|
|
||||||
|
# If a callback function is provided, call it with the chunk
|
||||||
|
if callback:
|
||||||
|
callback(chunk)
|
||||||
|
else:
|
||||||
|
# If this is not the last chunk, add the "response" field value to full_response and print it
|
||||||
|
if not chunk.get("done"):
|
||||||
|
response_piece = chunk.get("response", "")
|
||||||
|
full_response += response_piece
|
||||||
|
print(response_piece, end="", flush=True)
|
||||||
|
|
||||||
|
# Check if it's the last chunk (done is true)
|
||||||
|
if chunk.get("done"):
|
||||||
|
final_context = chunk.get("context")
|
||||||
|
|
||||||
|
# Return the full response and the final context
|
||||||
|
return full_response, final_context
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
# Create a blob file on the server if it doesn't exist.
|
||||||
|
def create_blob(digest, file_path):
|
||||||
|
url = f"{BASE_URL}/api/blobs/{digest}"
|
||||||
|
|
||||||
|
# Check if the blob exists
|
||||||
|
response = requests.head(url)
|
||||||
|
if response.status_code != 404:
|
||||||
|
return # Blob already exists, no need to upload
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Upload the blob
|
||||||
|
with open(file_path, 'rb') as file_data:
|
||||||
|
requests.post(url, data=file_data)
|
||||||
|
|
||||||
|
|
||||||
|
# Create a model from a Modelfile. Use the callback function to override the default handler.
|
||||||
|
def create(model_name, filename, callback=None):
|
||||||
|
try:
|
||||||
|
file_path = Path(filename).expanduser().resolve()
|
||||||
|
processed_lines = []
|
||||||
|
|
||||||
|
# Read and process the modelfile
|
||||||
|
with open(file_path, 'r') as f:
|
||||||
|
for line in f:
|
||||||
|
# Skip empty or whitespace-only lines
|
||||||
|
if not line.strip():
|
||||||
|
continue
|
||||||
|
|
||||||
|
command, args = line.split(maxsplit=1)
|
||||||
|
|
||||||
|
if command.upper() in ["FROM", "ADAPTER"]:
|
||||||
|
path = Path(args.strip()).expanduser()
|
||||||
|
|
||||||
|
# Check if path is relative and resolve it
|
||||||
|
if not path.is_absolute():
|
||||||
|
path = (file_path.parent / path)
|
||||||
|
|
||||||
|
# Skip if file does not exist for "model", this is handled by the server
|
||||||
|
if not path.exists():
|
||||||
|
processed_lines.append(line)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Calculate SHA-256 hash
|
||||||
|
with open(path, 'rb') as bin_file:
|
||||||
|
hash = hashlib.sha256()
|
||||||
|
hash.update(bin_file.read())
|
||||||
|
blob = f"sha256:{hash.hexdigest()}"
|
||||||
|
|
||||||
|
# Add the file to the remote server
|
||||||
|
create_blob(blob, path)
|
||||||
|
|
||||||
|
# Replace path with digest in the line
|
||||||
|
line = f"{command} @{blob}\n"
|
||||||
|
|
||||||
|
processed_lines.append(line)
|
||||||
|
|
||||||
|
# Combine processed lines back into a single string
|
||||||
|
modelfile_content = '\n'.join(processed_lines)
|
||||||
|
|
||||||
|
url = f"{BASE_URL}/api/create"
|
||||||
|
payload = {"name": model_name, "modelfile": modelfile_content}
|
||||||
|
|
||||||
|
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||||
|
with requests.post(url, json=payload, stream=True) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
# Iterating over the response line by line and displaying the status
|
||||||
|
for line in response.iter_lines():
|
||||||
|
if line:
|
||||||
|
chunk = json.loads(line)
|
||||||
|
if callback:
|
||||||
|
callback(chunk)
|
||||||
|
else:
|
||||||
|
print(f"Status: {chunk.get('status')}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
# Pull a model from a the model registry. Cancelled pulls are resumed from where they left off, and multiple
|
||||||
|
# calls to will share the same download progress. Use the callback function to override the default handler.
|
||||||
|
def pull(model_name, insecure=False, callback=None):
|
||||||
|
try:
|
||||||
|
url = f"{BASE_URL}/api/pull"
|
||||||
|
payload = {
|
||||||
|
"name": model_name,
|
||||||
|
"insecure": insecure
|
||||||
|
}
|
||||||
|
|
||||||
|
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||||
|
with requests.post(url, json=payload, stream=True) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Iterating over the response line by line and displaying the details
|
||||||
|
for line in response.iter_lines():
|
||||||
|
if line:
|
||||||
|
# Parsing each line (JSON chunk) and extracting the details
|
||||||
|
chunk = json.loads(line)
|
||||||
|
|
||||||
|
# If a callback function is provided, call it with the chunk
|
||||||
|
if callback:
|
||||||
|
callback(chunk)
|
||||||
|
else:
|
||||||
|
# Print the status message directly to the console
|
||||||
|
print(chunk.get('status', ''), end='', flush=True)
|
||||||
|
|
||||||
|
# If there's layer data, you might also want to print that (adjust as necessary)
|
||||||
|
if 'digest' in chunk:
|
||||||
|
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
||||||
|
print(f" - Total: {chunk['total']}", end='', flush=True)
|
||||||
|
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
||||||
|
else:
|
||||||
|
print()
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
|
||||||
|
# Push a model to the model registry. Use the callback function to override the default handler.
|
||||||
|
def push(model_name, insecure=False, callback=None):
|
||||||
|
try:
|
||||||
|
url = f"{BASE_URL}/api/push"
|
||||||
|
payload = {
|
||||||
|
"name": model_name,
|
||||||
|
"insecure": insecure
|
||||||
|
}
|
||||||
|
|
||||||
|
# Making a POST request with the stream parameter set to True to handle streaming responses
|
||||||
|
with requests.post(url, json=payload, stream=True) as response:
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Iterating over the response line by line and displaying the details
|
||||||
|
for line in response.iter_lines():
|
||||||
|
if line:
|
||||||
|
# Parsing each line (JSON chunk) and extracting the details
|
||||||
|
chunk = json.loads(line)
|
||||||
|
|
||||||
|
# If a callback function is provided, call it with the chunk
|
||||||
|
if callback:
|
||||||
|
callback(chunk)
|
||||||
|
else:
|
||||||
|
# Print the status message directly to the console
|
||||||
|
print(chunk.get('status', ''), end='', flush=True)
|
||||||
|
|
||||||
|
# If there's layer data, you might also want to print that (adjust as necessary)
|
||||||
|
if 'digest' in chunk:
|
||||||
|
print(f" - Digest: {chunk['digest']}", end='', flush=True)
|
||||||
|
print(f" - Total: {chunk['total']}", end='', flush=True)
|
||||||
|
print(f" - Completed: {chunk['completed']}", end='\n', flush=True)
|
||||||
|
else:
|
||||||
|
print()
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
|
||||||
|
# List models that are available locally.
|
||||||
|
def list():
|
||||||
|
try:
|
||||||
|
response = requests.get(f"{BASE_URL}/api/tags")
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
models = data.get('models', [])
|
||||||
|
return models
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Copy a model. Creates a model with another name from an existing model.
|
||||||
|
def copy(source, destination):
|
||||||
|
try:
|
||||||
|
# Create the JSON payload
|
||||||
|
payload = {
|
||||||
|
"source": source,
|
||||||
|
"destination": destination
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.post(f"{BASE_URL}/api/copy", json=payload)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# If the request was successful, return a message indicating that the copy was successful
|
||||||
|
return "Copy successful"
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Delete a model and its data.
|
||||||
|
def delete(model_name):
|
||||||
|
try:
|
||||||
|
url = f"{BASE_URL}/api/delete"
|
||||||
|
payload = {"name": model_name}
|
||||||
|
response = requests.delete(url, json=payload)
|
||||||
|
response.raise_for_status()
|
||||||
|
return "Delete successful"
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Show info about a model.
|
||||||
|
def show(model_name):
|
||||||
|
try:
|
||||||
|
url = f"{BASE_URL}/api/show"
|
||||||
|
payload = {"name": model_name}
|
||||||
|
response = requests.post(url, json=payload)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
# Parse the JSON response and return it
|
||||||
|
data = response.json()
|
||||||
|
return data
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def heartbeat():
|
||||||
|
try:
|
||||||
|
url = f"{BASE_URL}/"
|
||||||
|
response = requests.head(url)
|
||||||
|
response.raise_for_status()
|
||||||
|
return "Ollama is running"
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"An error occurred: {e}")
|
||||||
|
return "Ollama is not running"
|
||||||
70
api/types.go
@@ -34,26 +34,24 @@ func (e StatusError) Error() string {
|
|||||||
type ImageData []byte
|
type ImageData []byte
|
||||||
|
|
||||||
type GenerateRequest struct {
|
type GenerateRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
System string `json:"system"`
|
System string `json:"system"`
|
||||||
Template string `json:"template"`
|
Template string `json:"template"`
|
||||||
Context []int `json:"context,omitempty"`
|
Context []int `json:"context,omitempty"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
Raw bool `json:"raw,omitempty"`
|
Raw bool `json:"raw,omitempty"`
|
||||||
Format string `json:"format"`
|
Format string `json:"format"`
|
||||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
Images []ImageData `json:"images,omitempty"`
|
||||||
Images []ImageData `json:"images,omitempty"`
|
|
||||||
|
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ChatRequest struct {
|
type ChatRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Messages []Message `json:"messages"`
|
Messages []Message `json:"messages"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
Format string `json:"format"`
|
Format string `json:"format"`
|
||||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
|
||||||
|
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
@@ -128,9 +126,8 @@ type Runner struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type EmbeddingRequest struct {
|
type EmbeddingRequest struct {
|
||||||
Model string `json:"model"`
|
Model string `json:"model"`
|
||||||
Prompt string `json:"prompt"`
|
Prompt string `json:"prompt"`
|
||||||
KeepAlive *Duration `json:"keep_alive,omitempty"`
|
|
||||||
|
|
||||||
Options map[string]interface{} `json:"options"`
|
Options map[string]interface{} `json:"options"`
|
||||||
}
|
}
|
||||||
@@ -140,30 +137,17 @@ type EmbeddingResponse struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type CreateRequest struct {
|
type CreateRequest struct {
|
||||||
Model string `json:"model"`
|
Name string `json:"name"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Modelfile string `json:"modelfile"`
|
Modelfile string `json:"modelfile"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
|
||||||
// Name is deprecated, see Model
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DeleteRequest struct {
|
type DeleteRequest struct {
|
||||||
Model string `json:"model"`
|
|
||||||
|
|
||||||
// Name is deprecated, see Model
|
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ShowRequest struct {
|
type ShowRequest struct {
|
||||||
Model string `json:"model"`
|
|
||||||
System string `json:"system"`
|
|
||||||
Template string `json:"template"`
|
|
||||||
|
|
||||||
Options map[string]interface{} `json:"options"`
|
|
||||||
|
|
||||||
// Name is deprecated, see Model
|
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,7 +158,6 @@ type ShowResponse struct {
|
|||||||
Template string `json:"template,omitempty"`
|
Template string `json:"template,omitempty"`
|
||||||
System string `json:"system,omitempty"`
|
System string `json:"system,omitempty"`
|
||||||
Details ModelDetails `json:"details,omitempty"`
|
Details ModelDetails `json:"details,omitempty"`
|
||||||
Messages []Message `json:"messages,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type CopyRequest struct {
|
type CopyRequest struct {
|
||||||
@@ -183,14 +166,11 @@ type CopyRequest struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PullRequest struct {
|
type PullRequest struct {
|
||||||
Model string `json:"model"`
|
Name string `json:"name"`
|
||||||
Insecure bool `json:"insecure,omitempty"`
|
Insecure bool `json:"insecure,omitempty"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
|
||||||
// Name is deprecated, see Model
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ProgressResponse struct {
|
type ProgressResponse struct {
|
||||||
@@ -201,14 +181,11 @@ type ProgressResponse struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type PushRequest struct {
|
type PushRequest struct {
|
||||||
Model string `json:"model"`
|
Name string `json:"name"`
|
||||||
Insecure bool `json:"insecure,omitempty"`
|
Insecure bool `json:"insecure,omitempty"`
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
Stream *bool `json:"stream,omitempty"`
|
Stream *bool `json:"stream,omitempty"`
|
||||||
|
|
||||||
// Name is deprecated, see Model
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListResponse struct {
|
type ListResponse struct {
|
||||||
@@ -217,7 +194,6 @@ type ListResponse struct {
|
|||||||
|
|
||||||
type ModelResponse struct {
|
type ModelResponse struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Model string `json:"model"`
|
|
||||||
ModifiedAt time.Time `json:"modified_at"`
|
ModifiedAt time.Time `json:"modified_at"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Digest string `json:"digest"`
|
Digest string `json:"digest"`
|
||||||
@@ -240,7 +216,6 @@ type GenerateResponse struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ModelDetails struct {
|
type ModelDetails struct {
|
||||||
ParentModel string `json:"parent_model"`
|
|
||||||
Format string `json:"format"`
|
Format string `json:"format"`
|
||||||
Family string `json:"family"`
|
Family string `json:"family"`
|
||||||
Families []string `json:"families"`
|
Families []string `json:"families"`
|
||||||
@@ -415,18 +390,15 @@ func (d *Duration) UnmarshalJSON(b []byte) (err error) {
|
|||||||
switch t := v.(type) {
|
switch t := v.(type) {
|
||||||
case float64:
|
case float64:
|
||||||
if t < 0 {
|
if t < 0 {
|
||||||
d.Duration = time.Duration(math.MaxInt64)
|
t = math.MaxFloat64
|
||||||
} else {
|
|
||||||
d.Duration = time.Duration(t * float64(time.Second))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.Duration = time.Duration(t)
|
||||||
case string:
|
case string:
|
||||||
d.Duration, err = time.ParseDuration(t)
|
d.Duration, err = time.ParseDuration(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if d.Duration < 0 {
|
|
||||||
d.Duration = time.Duration(math.MaxInt64)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
93
app/.gitignore
vendored
@@ -1 +1,92 @@
|
|||||||
ollama.syso
|
# Logs
|
||||||
|
logs
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
lerna-debug.log*
|
||||||
|
|
||||||
|
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||||
|
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
|
||||||
|
|
||||||
|
# Runtime data
|
||||||
|
pids
|
||||||
|
*.pid
|
||||||
|
*.seed
|
||||||
|
*.pid.lock
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||||
|
lib-cov
|
||||||
|
|
||||||
|
# Coverage directory used by tools like istanbul
|
||||||
|
coverage
|
||||||
|
*.lcov
|
||||||
|
|
||||||
|
# nyc test coverage
|
||||||
|
.nyc_output
|
||||||
|
|
||||||
|
# node-waf configuration
|
||||||
|
.lock-wscript
|
||||||
|
|
||||||
|
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||||
|
build/Release
|
||||||
|
|
||||||
|
# Dependency directories
|
||||||
|
node_modules/
|
||||||
|
jspm_packages/
|
||||||
|
|
||||||
|
# TypeScript v1 declaration files
|
||||||
|
typings/
|
||||||
|
|
||||||
|
# TypeScript cache
|
||||||
|
*.tsbuildinfo
|
||||||
|
|
||||||
|
# Optional npm cache directory
|
||||||
|
.npm
|
||||||
|
|
||||||
|
# Optional eslint cache
|
||||||
|
.eslintcache
|
||||||
|
|
||||||
|
# Optional REPL history
|
||||||
|
.node_repl_history
|
||||||
|
|
||||||
|
# Output of 'npm pack'
|
||||||
|
*.tgz
|
||||||
|
|
||||||
|
# Yarn Integrity file
|
||||||
|
.yarn-integrity
|
||||||
|
|
||||||
|
# dotenv environment variables file
|
||||||
|
.env
|
||||||
|
.env.test
|
||||||
|
|
||||||
|
# parcel-bundler cache (https://parceljs.org/)
|
||||||
|
.cache
|
||||||
|
|
||||||
|
# next.js build output
|
||||||
|
.next
|
||||||
|
|
||||||
|
# nuxt.js build output
|
||||||
|
.nuxt
|
||||||
|
|
||||||
|
# vuepress build output
|
||||||
|
.vuepress/dist
|
||||||
|
|
||||||
|
# Serverless directories
|
||||||
|
.serverless/
|
||||||
|
|
||||||
|
# FuseBox cache
|
||||||
|
.fusebox/
|
||||||
|
|
||||||
|
# DynamoDB Local files
|
||||||
|
.dynamodb/
|
||||||
|
|
||||||
|
# Webpack
|
||||||
|
.webpack/
|
||||||
|
|
||||||
|
# Vite
|
||||||
|
.vite/
|
||||||
|
|
||||||
|
# Electron-Forge
|
||||||
|
out/
|
||||||
|
|||||||
@@ -1,22 +1,21 @@
|
|||||||
# Ollama App
|
# Desktop
|
||||||
|
|
||||||
## Linux
|
This app builds upon Ollama to provide a desktop experience for running models.
|
||||||
|
|
||||||
TODO
|
## Developing
|
||||||
|
|
||||||
## MacOS
|
First, build the `ollama` binary:
|
||||||
|
|
||||||
TODO
|
|
||||||
|
|
||||||
## Windows
|
|
||||||
|
|
||||||
If you want to build the installer, youll need to install
|
|
||||||
- https://jrsoftware.org/isinfo.php
|
|
||||||
|
|
||||||
|
|
||||||
In the top directory of this repo, run the following powershell script
|
|
||||||
to build the ollama CLI, ollama app, and ollama installer.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
powershell -ExecutionPolicy Bypass -File .\scripts\build_windows.ps1
|
cd ..
|
||||||
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Then run the desktop app with `npm start`:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd app
|
||||||
|
npm install
|
||||||
|
npm start
|
||||||
|
```
|
||||||
|
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 7.3 KiB |
@@ -1,17 +0,0 @@
|
|||||||
package assets
|
|
||||||
|
|
||||||
import (
|
|
||||||
"embed"
|
|
||||||
"io/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed *.ico
|
|
||||||
var icons embed.FS
|
|
||||||
|
|
||||||
func ListIcons() ([]string, error) {
|
|
||||||
return fs.Glob(icons, "*")
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetIcon(filename string) ([]byte, error) {
|
|
||||||
return icons.ReadFile(filename)
|
|
||||||
}
|
|
||||||
|
Before Width: | Height: | Size: 402 B After Width: | Height: | Size: 402 B |
|
Before Width: | Height: | Size: 741 B After Width: | Height: | Size: 741 B |
|
Before Width: | Height: | Size: 440 B After Width: | Height: | Size: 440 B |
|
Before Width: | Height: | Size: 763 B After Width: | Height: | Size: 763 B |
|
Before Width: | Height: | Size: 447 B After Width: | Height: | Size: 447 B |
|
Before Width: | Height: | Size: 891 B After Width: | Height: | Size: 891 B |
|
Before Width: | Height: | Size: 443 B After Width: | Height: | Size: 443 B |
|
Before Width: | Height: | Size: 844 B After Width: | Height: | Size: 844 B |
|
Before Width: | Height: | Size: 76 KiB |
|
Before Width: | Height: | Size: 89 KiB |
|
Before Width: | Height: | Size: 91 KiB |
@@ -1,9 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
|
|
||||||
package lifecycle
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
func GetStarted() error {
|
|
||||||
return fmt.Errorf("GetStarted not implemented")
|
|
||||||
}
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetStarted() error {
|
|
||||||
const CREATE_NEW_CONSOLE = 0x00000010
|
|
||||||
var err error
|
|
||||||
bannerScript := filepath.Join(AppDir, "ollama_welcome.ps1")
|
|
||||||
args := []string{
|
|
||||||
// TODO once we're signed, the execution policy bypass should be removed
|
|
||||||
"powershell", "-noexit", "-ExecutionPolicy", "Bypass", "-nologo", "-file", bannerScript,
|
|
||||||
}
|
|
||||||
args[0], err = exec.LookPath(args[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the script actually exists
|
|
||||||
_, err = os.Stat(bannerScript)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("getting started banner script error %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Info(fmt.Sprintf("opening getting started terminal with %v", args))
|
|
||||||
attrs := &os.ProcAttr{
|
|
||||||
Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
|
|
||||||
Sys: &syscall.SysProcAttr{CreationFlags: CREATE_NEW_CONSOLE, HideWindow: false},
|
|
||||||
}
|
|
||||||
proc, err := os.StartProcess(args[0], args, attrs)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to start getting started shell %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("getting started terminal PID: %d", proc.Pid))
|
|
||||||
return proc.Release()
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/app/store"
|
|
||||||
"github.com/jmorganca/ollama/app/tray"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Run() {
|
|
||||||
InitLogging()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
var done chan int
|
|
||||||
|
|
||||||
t, err := tray.NewTray()
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Failed to start: %s", err)
|
|
||||||
}
|
|
||||||
callbacks := t.GetCallbacks()
|
|
||||||
|
|
||||||
signals := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
slog.Debug("starting callback loop")
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-callbacks.Quit:
|
|
||||||
slog.Debug("quit called")
|
|
||||||
t.Quit()
|
|
||||||
case <-signals:
|
|
||||||
slog.Debug("shutting down due to signal")
|
|
||||||
t.Quit()
|
|
||||||
case <-callbacks.Update:
|
|
||||||
err := DoUpgrade(cancel, done)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("upgrade attempt failed: %s", err))
|
|
||||||
}
|
|
||||||
case <-callbacks.ShowLogs:
|
|
||||||
ShowLogs()
|
|
||||||
case <-callbacks.DoFirstUse:
|
|
||||||
err := GetStarted()
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("Failed to launch getting started shell: %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Are we first use?
|
|
||||||
if !store.GetFirstTimeRun() {
|
|
||||||
slog.Debug("First time run")
|
|
||||||
err = t.DisplayFirstUseNotification()
|
|
||||||
if err != nil {
|
|
||||||
slog.Debug(fmt.Sprintf("XXX failed to display first use notification %v", err))
|
|
||||||
}
|
|
||||||
store.SetFirstTimeRun(true)
|
|
||||||
} else {
|
|
||||||
slog.Debug("Not first time, skipping first run notification")
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsServerRunning(ctx) {
|
|
||||||
slog.Info("Detected another instance of ollama running, exiting")
|
|
||||||
os.Exit(1)
|
|
||||||
} else {
|
|
||||||
done, err = SpawnServer(ctx, CLIName)
|
|
||||||
if err != nil {
|
|
||||||
// TODO - should we retry in a backoff loop?
|
|
||||||
// TODO - should we pop up a warning and maybe add a menu item to view application logs?
|
|
||||||
slog.Error(fmt.Sprintf("Failed to spawn ollama server %s", err))
|
|
||||||
done = make(chan int, 1)
|
|
||||||
done <- 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
StartBackgroundUpdaterChecker(ctx, t.UpdateAvailable)
|
|
||||||
|
|
||||||
t.Run()
|
|
||||||
cancel()
|
|
||||||
slog.Info("Waiting for ollama server to shutdown...")
|
|
||||||
if done != nil {
|
|
||||||
<-done
|
|
||||||
}
|
|
||||||
slog.Info("Ollama app exiting")
|
|
||||||
}
|
|
||||||
@@ -1,46 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func InitLogging() {
|
|
||||||
level := slog.LevelInfo
|
|
||||||
|
|
||||||
if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
|
|
||||||
level = slog.LevelDebug
|
|
||||||
}
|
|
||||||
|
|
||||||
var logFile *os.File
|
|
||||||
var err error
|
|
||||||
// Detect if we're a GUI app on windows, and if not, send logs to console
|
|
||||||
if os.Stderr.Fd() != 0 {
|
|
||||||
// Console app detected
|
|
||||||
logFile = os.Stderr
|
|
||||||
// TODO - write one-line to the app.log file saying we're running in console mode to help avoid confusion
|
|
||||||
} else {
|
|
||||||
logFile, err = os.OpenFile(AppLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to create server log %v", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
handler := slog.NewTextHandler(logFile, &slog.HandlerOptions{
|
|
||||||
Level: level,
|
|
||||||
AddSource: true,
|
|
||||||
ReplaceAttr: func(_ []string, attr slog.Attr) slog.Attr {
|
|
||||||
if attr.Key == slog.SourceKey {
|
|
||||||
source := attr.Value.Any().(*slog.Source)
|
|
||||||
source.File = filepath.Base(source.File)
|
|
||||||
}
|
|
||||||
return attr
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
slog.SetDefault(slog.New(handler))
|
|
||||||
|
|
||||||
slog.Info("ollama app started")
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
|
|
||||||
package lifecycle
|
|
||||||
|
|
||||||
import "log/slog"
|
|
||||||
|
|
||||||
func ShowLogs() {
|
|
||||||
slog.Warn("ShowLogs not yet implemented")
|
|
||||||
}
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os/exec"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ShowLogs() {
|
|
||||||
cmd_path := "c:\\Windows\\system32\\cmd.exe"
|
|
||||||
slog.Debug(fmt.Sprintf("viewing logs with start %s", AppDataDir))
|
|
||||||
cmd := exec.Command(cmd_path, "/c", "start", AppDataDir)
|
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: false, CreationFlags: 0x08000000}
|
|
||||||
err := cmd.Start()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("Failed to open log dir: %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
AppName = "ollama app"
|
|
||||||
CLIName = "ollama"
|
|
||||||
AppDir = "/opt/Ollama"
|
|
||||||
AppDataDir = "/opt/Ollama"
|
|
||||||
// TODO - should there be a distinct log dir?
|
|
||||||
UpdateStageDir = "/tmp"
|
|
||||||
AppLogFile = "/tmp/ollama_app.log"
|
|
||||||
ServerLogFile = "/tmp/ollama.log"
|
|
||||||
UpgradeLogFile = "/tmp/ollama_update.log"
|
|
||||||
Installer = "OllamaSetup.exe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
AppName += ".exe"
|
|
||||||
CLIName += ".exe"
|
|
||||||
// Logs, configs, downloads go to LOCALAPPDATA
|
|
||||||
localAppData := os.Getenv("LOCALAPPDATA")
|
|
||||||
AppDataDir = filepath.Join(localAppData, "Ollama")
|
|
||||||
UpdateStageDir = filepath.Join(AppDataDir, "updates")
|
|
||||||
AppLogFile = filepath.Join(AppDataDir, "app.log")
|
|
||||||
ServerLogFile = filepath.Join(AppDataDir, "server.log")
|
|
||||||
UpgradeLogFile = filepath.Join(AppDataDir, "upgrade.log")
|
|
||||||
|
|
||||||
// Executables are stored in APPDATA
|
|
||||||
AppDir = filepath.Join(localAppData, "Programs", "Ollama")
|
|
||||||
|
|
||||||
// Make sure we have PATH set correctly for any spawned children
|
|
||||||
paths := strings.Split(os.Getenv("PATH"), ";")
|
|
||||||
// Start with whatever we find in the PATH/LD_LIBRARY_PATH
|
|
||||||
found := false
|
|
||||||
for _, path := range paths {
|
|
||||||
d, err := filepath.Abs(path)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if strings.EqualFold(AppDir, d) {
|
|
||||||
found = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
paths = append(paths, AppDir)
|
|
||||||
|
|
||||||
pathVal := strings.Join(paths, ";")
|
|
||||||
slog.Debug("setting PATH=" + pathVal)
|
|
||||||
err := os.Setenv("PATH", pathVal)
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to update PATH: %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure our logging dir exists
|
|
||||||
_, err := os.Stat(AppDataDir)
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
if err := os.MkdirAll(AppDataDir, 0o755); err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if runtime.GOOS == "darwin" {
|
|
||||||
// TODO
|
|
||||||
AppName += ".app"
|
|
||||||
// } else if runtime.GOOS == "linux" {
|
|
||||||
// TODO
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,139 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getCLIFullPath(command string) string {
|
|
||||||
cmdPath := ""
|
|
||||||
appExe, err := os.Executable()
|
|
||||||
if err == nil {
|
|
||||||
cmdPath = filepath.Join(filepath.Dir(appExe), command)
|
|
||||||
_, err := os.Stat(cmdPath)
|
|
||||||
if err == nil {
|
|
||||||
return cmdPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cmdPath, err = exec.LookPath(command)
|
|
||||||
if err == nil {
|
|
||||||
_, err := os.Stat(cmdPath)
|
|
||||||
if err == nil {
|
|
||||||
return cmdPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pwd, err := os.Getwd()
|
|
||||||
if err == nil {
|
|
||||||
cmdPath = filepath.Join(pwd, command)
|
|
||||||
_, err = os.Stat(cmdPath)
|
|
||||||
if err == nil {
|
|
||||||
return cmdPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return command
|
|
||||||
}
|
|
||||||
|
|
||||||
func SpawnServer(ctx context.Context, command string) (chan int, error) {
|
|
||||||
done := make(chan int)
|
|
||||||
|
|
||||||
logDir := filepath.Dir(ServerLogFile)
|
|
||||||
_, err := os.Stat(logDir)
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
|
||||||
return done, fmt.Errorf("create ollama server log dir %s: %v", logDir, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := getCmd(ctx, getCLIFullPath(command))
|
|
||||||
// send stdout and stderr to a file
|
|
||||||
stdout, err := cmd.StdoutPipe()
|
|
||||||
if err != nil {
|
|
||||||
return done, fmt.Errorf("failed to spawn server stdout pipe %s", err)
|
|
||||||
}
|
|
||||||
stderr, err := cmd.StderrPipe()
|
|
||||||
if err != nil {
|
|
||||||
return done, fmt.Errorf("failed to spawn server stderr pipe %s", err)
|
|
||||||
}
|
|
||||||
stdin, err := cmd.StdinPipe()
|
|
||||||
if err != nil {
|
|
||||||
return done, fmt.Errorf("failed to spawn server stdin pipe %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO - rotation
|
|
||||||
logFile, err := os.OpenFile(ServerLogFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0755)
|
|
||||||
if err != nil {
|
|
||||||
return done, fmt.Errorf("failed to create server log %w", err)
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
defer logFile.Close()
|
|
||||||
io.Copy(logFile, stdout) //nolint:errcheck
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
defer logFile.Close()
|
|
||||||
io.Copy(logFile, stderr) //nolint:errcheck
|
|
||||||
}()
|
|
||||||
|
|
||||||
// run the command and wait for it to finish
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
return done, fmt.Errorf("failed to start server %w", err)
|
|
||||||
}
|
|
||||||
if cmd.Process != nil {
|
|
||||||
slog.Info(fmt.Sprintf("started ollama server with pid %d", cmd.Process.Pid))
|
|
||||||
}
|
|
||||||
slog.Info(fmt.Sprintf("ollama server logs %s", ServerLogFile))
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
// Keep the server running unless we're shuttind down the app
|
|
||||||
crashCount := 0
|
|
||||||
for {
|
|
||||||
cmd.Wait() //nolint:errcheck
|
|
||||||
stdin.Close()
|
|
||||||
var code int
|
|
||||||
if cmd.ProcessState != nil {
|
|
||||||
code = cmd.ProcessState.ExitCode()
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
slog.Debug(fmt.Sprintf("server shutdown with exit code %d", code))
|
|
||||||
done <- code
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
crashCount++
|
|
||||||
slog.Warn(fmt.Sprintf("server crash %d - exit code %d - respawning", crashCount, code))
|
|
||||||
time.Sleep(500 * time.Millisecond)
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to restart server %s", err))
|
|
||||||
// Keep trying, but back off if we keep failing
|
|
||||||
time.Sleep(time.Duration(crashCount) * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return done, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsServerRunning(ctx context.Context) bool {
|
|
||||||
client, err := api.ClientFromEnvironment()
|
|
||||||
if err != nil {
|
|
||||||
slog.Info("unable to connect to server")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
err = client.Heartbeat(ctx)
|
|
||||||
if err != nil {
|
|
||||||
slog.Debug(fmt.Sprintf("heartbeat from server: %s", err))
|
|
||||||
slog.Info("unable to connect to server")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
|
|
||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os/exec"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getCmd(ctx context.Context, cmd string) *exec.Cmd {
|
|
||||||
return exec.CommandContext(ctx, cmd, "serve")
|
|
||||||
}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os/exec"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getCmd(ctx context.Context, exePath string) *exec.Cmd {
|
|
||||||
cmd := exec.CommandContext(ctx, exePath, "serve")
|
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true, CreationFlags: 0x08000000}
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
@@ -1,238 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log/slog"
|
|
||||||
"mime"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/auth"
|
|
||||||
"github.com/jmorganca/ollama/version"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
UpdateCheckURLBase = "https://ollama.com/api/update"
|
|
||||||
UpdateDownloaded = false
|
|
||||||
UpdateCheckInterval = 60 * 60 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO - maybe move up to the API package?
|
|
||||||
type UpdateResponse struct {
|
|
||||||
UpdateURL string `json:"url"`
|
|
||||||
UpdateVersion string `json:"version"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func getClient(req *http.Request) http.Client {
|
|
||||||
proxyURL, err := http.ProxyFromEnvironment(req)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to handle proxy: %s", err))
|
|
||||||
return http.Client{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: http.ProxyURL(proxyURL),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
|
|
||||||
var updateResp UpdateResponse
|
|
||||||
|
|
||||||
requestURL, err := url.Parse(UpdateCheckURLBase)
|
|
||||||
if err != nil {
|
|
||||||
return false, updateResp
|
|
||||||
}
|
|
||||||
|
|
||||||
query := requestURL.Query()
|
|
||||||
query.Add("os", runtime.GOOS)
|
|
||||||
query.Add("arch", runtime.GOARCH)
|
|
||||||
query.Add("version", version.Version)
|
|
||||||
query.Add("ts", fmt.Sprintf("%d", time.Now().Unix()))
|
|
||||||
|
|
||||||
nonce, err := auth.NewNonce(rand.Reader, 16)
|
|
||||||
if err != nil {
|
|
||||||
return false, updateResp
|
|
||||||
}
|
|
||||||
|
|
||||||
query.Add("nonce", nonce)
|
|
||||||
requestURL.RawQuery = query.Encode()
|
|
||||||
|
|
||||||
data := []byte(fmt.Sprintf("%s,%s", http.MethodGet, requestURL.RequestURI()))
|
|
||||||
signature, err := auth.Sign(ctx, data)
|
|
||||||
if err != nil {
|
|
||||||
return false, updateResp
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestURL.String(), nil)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to check for update: %s", err))
|
|
||||||
return false, updateResp
|
|
||||||
}
|
|
||||||
req.Header.Set("Authorization", signature)
|
|
||||||
req.Header.Set("User-Agent", fmt.Sprintf("ollama/%s (%s %s) Go/%s", version.Version, runtime.GOARCH, runtime.GOOS, runtime.Version()))
|
|
||||||
client := getClient(req)
|
|
||||||
|
|
||||||
slog.Debug("checking for available update", "requestURL", requestURL)
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to check for update: %s", err))
|
|
||||||
return false, updateResp
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode == 204 {
|
|
||||||
slog.Debug("check update response 204 (current version is up to date)")
|
|
||||||
return false, updateResp
|
|
||||||
}
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to read body response: %s", err))
|
|
||||||
}
|
|
||||||
err = json.Unmarshal(body, &updateResp)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("malformed response checking for update: %s", err))
|
|
||||||
return false, updateResp
|
|
||||||
}
|
|
||||||
// Extract the version string from the URL in the github release artifact path
|
|
||||||
updateResp.UpdateVersion = path.Base(path.Dir(updateResp.UpdateURL))
|
|
||||||
|
|
||||||
slog.Info("New update available at " + updateResp.UpdateURL)
|
|
||||||
return true, updateResp
|
|
||||||
}
|
|
||||||
|
|
||||||
func DownloadNewRelease(ctx context.Context, updateResp UpdateResponse) error {
|
|
||||||
// Do a head first to check etag info
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodHead, updateResp.UpdateURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
client := getClient(req)
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error checking update: %w", err)
|
|
||||||
}
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
resp.Body.Close()
|
|
||||||
etag := strings.Trim(resp.Header.Get("etag"), "\"")
|
|
||||||
if etag == "" {
|
|
||||||
slog.Debug("no etag detected, falling back to filename based dedup")
|
|
||||||
etag = "_"
|
|
||||||
}
|
|
||||||
filename := Installer
|
|
||||||
_, params, err := mime.ParseMediaType(resp.Header.Get("content-disposition"))
|
|
||||||
if err == nil {
|
|
||||||
filename = params["filename"]
|
|
||||||
}
|
|
||||||
|
|
||||||
stageFilename := filepath.Join(UpdateStageDir, etag, filename)
|
|
||||||
|
|
||||||
// Check to see if we already have it downloaded
|
|
||||||
_, err = os.Stat(stageFilename)
|
|
||||||
if err == nil {
|
|
||||||
slog.Info("update already downloaded")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanupOldDownloads()
|
|
||||||
|
|
||||||
req.Method = http.MethodGet
|
|
||||||
resp, err = client.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error checking update: %w", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
etag = strings.Trim(resp.Header.Get("etag"), "\"")
|
|
||||||
if etag == "" {
|
|
||||||
slog.Debug("no etag detected, falling back to filename based dedup") // TODO probably can get rid of this redundant log
|
|
||||||
etag = "_"
|
|
||||||
}
|
|
||||||
|
|
||||||
stageFilename = filepath.Join(UpdateStageDir, etag, filename)
|
|
||||||
|
|
||||||
_, err = os.Stat(filepath.Dir(stageFilename))
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
if err := os.MkdirAll(filepath.Dir(stageFilename), 0o755); err != nil {
|
|
||||||
return fmt.Errorf("create ollama dir %s: %v", filepath.Dir(stageFilename), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, err := io.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to read body response: %w", err)
|
|
||||||
}
|
|
||||||
fp, err := os.OpenFile(stageFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("write payload %s: %w", stageFilename, err)
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
if n, err := fp.Write(payload); err != nil || n != len(payload) {
|
|
||||||
return fmt.Errorf("write payload %s: %d vs %d -- %w", stageFilename, n, len(payload), err)
|
|
||||||
}
|
|
||||||
slog.Info("new update downloaded " + stageFilename)
|
|
||||||
|
|
||||||
UpdateDownloaded = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanupOldDownloads() {
|
|
||||||
files, err := os.ReadDir(UpdateStageDir)
|
|
||||||
if err != nil && errors.Is(err, os.ErrNotExist) {
|
|
||||||
// Expected behavior on first run
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to list stage dir: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, file := range files {
|
|
||||||
fullname := filepath.Join(UpdateStageDir, file.Name())
|
|
||||||
slog.Debug("cleaning up old download: " + fullname)
|
|
||||||
err = os.RemoveAll(fullname)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to cleanup stale update download %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func StartBackgroundUpdaterChecker(ctx context.Context, cb func(string) error) {
|
|
||||||
go func() {
|
|
||||||
// Don't blast an update message immediately after startup
|
|
||||||
// time.Sleep(30 * time.Second)
|
|
||||||
time.Sleep(3 * time.Second)
|
|
||||||
|
|
||||||
for {
|
|
||||||
available, resp := IsNewReleaseAvailable(ctx)
|
|
||||||
if available {
|
|
||||||
err := DownloadNewRelease(ctx, resp)
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to download new release: %s", err))
|
|
||||||
}
|
|
||||||
err = cb(resp.UpdateVersion)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to register update available with tray: %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
slog.Debug("stopping background update checker")
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
time.Sleep(UpdateCheckInterval)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
|
|
||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
|
||||||
return fmt.Errorf("DoUpgrade not yet implemented")
|
|
||||||
}
|
|
||||||
@@ -1,80 +0,0 @@
|
|||||||
package lifecycle
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func DoUpgrade(cancel context.CancelFunc, done chan int) error {
|
|
||||||
files, err := filepath.Glob(filepath.Join(UpdateStageDir, "*", "*.exe")) // TODO generalize for multiplatform
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to lookup downloads: %s", err)
|
|
||||||
}
|
|
||||||
if len(files) == 0 {
|
|
||||||
return fmt.Errorf("no update downloads found")
|
|
||||||
} else if len(files) > 1 {
|
|
||||||
// Shouldn't happen
|
|
||||||
slog.Warn(fmt.Sprintf("multiple downloads found, using first one %v", files))
|
|
||||||
}
|
|
||||||
installerExe := files[0]
|
|
||||||
|
|
||||||
slog.Info("starting upgrade with " + installerExe)
|
|
||||||
slog.Info("upgrade log file " + UpgradeLogFile)
|
|
||||||
|
|
||||||
// When running in debug mode, we'll be "verbose" and let the installer pop up and prompt
|
|
||||||
installArgs := []string{
|
|
||||||
"/CLOSEAPPLICATIONS", // Quit the tray app if it's still running
|
|
||||||
"/LOG=" + filepath.Base(UpgradeLogFile), // Only relative seems reliable, so set pwd
|
|
||||||
"/FORCECLOSEAPPLICATIONS", // Force close the tray app - might be needed
|
|
||||||
}
|
|
||||||
// When we're not in debug mode, make the upgrade as quiet as possible (no GUI, no prompts)
|
|
||||||
// TODO - temporarily disable since we're pinning in debug mode for the preview
|
|
||||||
// if debug := os.Getenv("OLLAMA_DEBUG"); debug == "" {
|
|
||||||
installArgs = append(installArgs,
|
|
||||||
"/SP", // Skip the "This will install... Do you wish to continue" prompt
|
|
||||||
"/SUPPRESSMSGBOXES",
|
|
||||||
"/SILENT",
|
|
||||||
"/VERYSILENT",
|
|
||||||
)
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Safeguard in case we have requests in flight that need to drain...
|
|
||||||
slog.Info("Waiting for server to shutdown")
|
|
||||||
cancel()
|
|
||||||
if done != nil {
|
|
||||||
<-done
|
|
||||||
} else {
|
|
||||||
// Shouldn't happen
|
|
||||||
slog.Warn("done chan was nil, not actually waiting")
|
|
||||||
}
|
|
||||||
|
|
||||||
slog.Debug(fmt.Sprintf("starting installer: %s %v", installerExe, installArgs))
|
|
||||||
os.Chdir(filepath.Dir(UpgradeLogFile)) //nolint:errcheck
|
|
||||||
cmd := exec.Command(installerExe, installArgs...)
|
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
return fmt.Errorf("unable to start ollama app %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cmd.Process != nil {
|
|
||||||
err = cmd.Process.Release()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to release server process: %s", err))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// TODO - some details about why it didn't start, or is this a pedantic error case?
|
|
||||||
return fmt.Errorf("installer process did not start")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO should we linger for a moment and check to make sure it's actually running by checking the pid?
|
|
||||||
|
|
||||||
slog.Info("Installer started in background, exiting")
|
|
||||||
|
|
||||||
os.Exit(0)
|
|
||||||
// Not reached
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
12
app/main.go
@@ -1,12 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// Compile with the following to get rid of the cmd pop up on windows
|
|
||||||
// go build -ldflags="-H windowsgui" .
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/jmorganca/ollama/app/lifecycle"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
lifecycle.Run()
|
|
||||||
}
|
|
||||||
153
app/ollama.iss
@@ -1,153 +0,0 @@
|
|||||||
; Inno Setup Installer for Ollama
|
|
||||||
;
|
|
||||||
; To build the installer use the build script invoked from the top of the source tree
|
|
||||||
;
|
|
||||||
; powershell -ExecutionPolicy Bypass -File .\scripts\build_windows.ps
|
|
||||||
|
|
||||||
|
|
||||||
#define MyAppName "Ollama"
|
|
||||||
#if GetEnv("PKG_VERSION") != ""
|
|
||||||
#define MyAppVersion GetEnv("PKG_VERSION")
|
|
||||||
#else
|
|
||||||
#define MyAppVersion "0.0.0"
|
|
||||||
#endif
|
|
||||||
#define MyAppPublisher "Ollama"
|
|
||||||
#define MyAppURL "https://ollama.com/"
|
|
||||||
#define MyAppExeName "ollama app.exe"
|
|
||||||
#define MyIcon ".\assets\app.ico"
|
|
||||||
|
|
||||||
[Setup]
|
|
||||||
; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications.
|
|
||||||
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
|
|
||||||
AppId={{44E83376-CE68-45EB-8FC1-393500EB558C}
|
|
||||||
AppName={#MyAppName}
|
|
||||||
AppVersion={#MyAppVersion}
|
|
||||||
VersionInfoVersion={#MyAppVersion}
|
|
||||||
;AppVerName={#MyAppName} {#MyAppVersion}
|
|
||||||
AppPublisher={#MyAppPublisher}
|
|
||||||
AppPublisherURL={#MyAppURL}
|
|
||||||
AppSupportURL={#MyAppURL}
|
|
||||||
AppUpdatesURL={#MyAppURL}
|
|
||||||
ArchitecturesAllowed=x64
|
|
||||||
ArchitecturesInstallIn64BitMode=x64
|
|
||||||
DefaultDirName={localappdata}\Programs\{#MyAppName}
|
|
||||||
DefaultGroupName={#MyAppName}
|
|
||||||
DisableProgramGroupPage=yes
|
|
||||||
PrivilegesRequired=lowest
|
|
||||||
OutputBaseFilename="OllamaSetup"
|
|
||||||
SetupIconFile={#MyIcon}
|
|
||||||
UninstallDisplayIcon={uninstallexe}
|
|
||||||
Compression=lzma2
|
|
||||||
SolidCompression=no
|
|
||||||
WizardStyle=modern
|
|
||||||
ChangesEnvironment=yes
|
|
||||||
OutputDir=..\dist\
|
|
||||||
|
|
||||||
; Disable logging once everything's battle tested
|
|
||||||
; Filename will be %TEMP%\Setup Log*.txt
|
|
||||||
SetupLogging=yes
|
|
||||||
CloseApplications=yes
|
|
||||||
RestartApplications=no
|
|
||||||
|
|
||||||
; Make sure they can at least download llama2 as a minimum
|
|
||||||
ExtraDiskSpaceRequired=3826806784
|
|
||||||
|
|
||||||
; https://jrsoftware.org/ishelp/index.php?topic=setup_wizardimagefile
|
|
||||||
WizardSmallImageFile=.\assets\setup.bmp
|
|
||||||
|
|
||||||
; TODO verifty actual min windows version...
|
|
||||||
; OG Win 10
|
|
||||||
MinVersion=10.0.10240
|
|
||||||
|
|
||||||
; First release that supports WinRT UI Composition for win32 apps
|
|
||||||
; MinVersion=10.0.17134
|
|
||||||
; First release with XAML Islands - possible UI path forward
|
|
||||||
; MinVersion=10.0.18362
|
|
||||||
|
|
||||||
; quiet...
|
|
||||||
DisableDirPage=yes
|
|
||||||
DisableFinishedPage=yes
|
|
||||||
DisableReadyMemo=yes
|
|
||||||
DisableReadyPage=yes
|
|
||||||
DisableStartupPrompt=yes
|
|
||||||
DisableWelcomePage=yes
|
|
||||||
|
|
||||||
; TODO - percentage can't be set less than 100, so how to make it shorter?
|
|
||||||
; WizardSizePercent=100,80
|
|
||||||
|
|
||||||
#if GetEnv("KEY_CONTAINER")
|
|
||||||
SignTool=MySignTool
|
|
||||||
SignedUninstaller=yes
|
|
||||||
#endif
|
|
||||||
|
|
||||||
SetupMutex=OllamaSetupMutex
|
|
||||||
|
|
||||||
[Languages]
|
|
||||||
Name: "english"; MessagesFile: "compiler:Default.isl"
|
|
||||||
|
|
||||||
[LangOptions]
|
|
||||||
DialogFontSize=12
|
|
||||||
|
|
||||||
[Files]
|
|
||||||
Source: ".\app.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}" ; Flags: ignoreversion 64bit
|
|
||||||
Source: "..\ollama.exe"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
|
||||||
Source: "..\dist\windeps\*.dll"; DestDir: "{app}"; Flags: ignoreversion 64bit
|
|
||||||
Source: "..\dist\ollama_welcome.ps1"; DestDir: "{app}"; Flags: ignoreversion
|
|
||||||
Source: ".\assets\app.ico"; DestDir: "{app}"; Flags: ignoreversion
|
|
||||||
|
|
||||||
[Icons]
|
|
||||||
Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
|
||||||
Name: "{userstartup}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
|
||||||
Name: "{userprograms}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; IconFilename: "{app}\app.ico"
|
|
||||||
|
|
||||||
[Run]
|
|
||||||
Filename: "{cmd}"; Parameters: "/C set PATH={app};%PATH% & ""{app}\{#MyAppExeName}"""; Flags: postinstall nowait runhidden
|
|
||||||
|
|
||||||
[UninstallRun]
|
|
||||||
; Filename: "{cmd}"; Parameters: "/C ""taskkill /im ''{#MyAppExeName}'' /f /t"; Flags: runhidden
|
|
||||||
; Filename: "{cmd}"; Parameters: "/C ""taskkill /im ollama.exe /f /t"; Flags: runhidden
|
|
||||||
Filename: "taskkill"; Parameters: "/im ""{#MyAppExeName}"" /f /t"; Flags: runhidden
|
|
||||||
Filename: "taskkill"; Parameters: "/im ""ollama.exe"" /f /t"; Flags: runhidden
|
|
||||||
; HACK! need to give the server and app enough time to exit
|
|
||||||
; TODO - convert this to a Pascal code script so it waits until they're no longer running, then completes
|
|
||||||
Filename: "{cmd}"; Parameters: "/c timeout 5"; Flags: runhidden
|
|
||||||
|
|
||||||
[UninstallDelete]
|
|
||||||
Type: filesandordirs; Name: "{%TEMP}\ollama*"
|
|
||||||
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Ollama"
|
|
||||||
Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama"
|
|
||||||
Type: filesandordirs; Name: "{%USERPROFILE}\.ollama"
|
|
||||||
; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved
|
|
||||||
|
|
||||||
[Messages]
|
|
||||||
WizardReady=Ollama Windows Preview
|
|
||||||
ReadyLabel1=%nLet's get you up and running with your own large language models.
|
|
||||||
SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or finish the other installer, then click OK to continue with this install, or Cancel to exit.
|
|
||||||
|
|
||||||
|
|
||||||
;FinishedHeadingLabel=Run your first model
|
|
||||||
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama2
|
|
||||||
;ClickFinish=%n
|
|
||||||
|
|
||||||
[Registry]
|
|
||||||
Root: HKCU; Subkey: "Environment"; \
|
|
||||||
ValueType: expandsz; ValueName: "Path"; ValueData: "{olddata};{app}"; \
|
|
||||||
Check: NeedsAddPath('{app}')
|
|
||||||
|
|
||||||
[Code]
|
|
||||||
|
|
||||||
function NeedsAddPath(Param: string): boolean;
|
|
||||||
var
|
|
||||||
OrigPath: string;
|
|
||||||
begin
|
|
||||||
if not RegQueryStringValue(HKEY_CURRENT_USER,
|
|
||||||
'Environment',
|
|
||||||
'Path', OrigPath)
|
|
||||||
then begin
|
|
||||||
Result := True;
|
|
||||||
exit;
|
|
||||||
end;
|
|
||||||
{ look for the path with leading and trailing semicolon }
|
|
||||||
{ Pos() returns 0 if not found }
|
|
||||||
Result := Pos(';' + ExpandConstant(Param) + ';', ';' + OrigPath + ';') = 0;
|
|
||||||
end;
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
#include <winver.h>
|
|
||||||
|
|
||||||
VS_VERSION_INFO VERSIONINFO
|
|
||||||
FILEFLAGSMASK 0x3fL
|
|
||||||
#ifdef _DEBUG
|
|
||||||
FILEFLAGS 0x1L
|
|
||||||
#else
|
|
||||||
FILEFLAGS 0x0L
|
|
||||||
#endif
|
|
||||||
FILEOS 0x40004L
|
|
||||||
FILETYPE 0x1L
|
|
||||||
FILESUBTYPE 0x0L
|
|
||||||
BEGIN
|
|
||||||
BLOCK "StringFileInfo"
|
|
||||||
BEGIN
|
|
||||||
BLOCK "040904b0"
|
|
||||||
BEGIN
|
|
||||||
VALUE "FileDescription", "Ollama"
|
|
||||||
VALUE "InternalName", "Ollama"
|
|
||||||
VALUE "OriginalFilename", "ollama app.exe"
|
|
||||||
VALUE "ProductName", "Ollama"
|
|
||||||
END
|
|
||||||
END
|
|
||||||
|
|
||||||
BLOCK "VarFileInfo"
|
|
||||||
BEGIN
|
|
||||||
VALUE "Translation", 0x409, 1200
|
|
||||||
END
|
|
||||||
END
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
# TODO - consider ANSI colors and maybe ASCII art...
|
|
||||||
write-host ""
|
|
||||||
write-host "Welcome to Ollama!"
|
|
||||||
write-host ""
|
|
||||||
write-host "Run your first model:"
|
|
||||||
write-host ""
|
|
||||||
write-host "`tollama run llama2"
|
|
||||||
write-host ""
|
|
||||||
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
@@ -1,98 +0,0 @@
|
|||||||
package store
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Store struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
FirstTimeRun bool `json:"first-time-run"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
lock sync.Mutex
|
|
||||||
store Store
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetID() string {
|
|
||||||
lock.Lock()
|
|
||||||
defer lock.Unlock()
|
|
||||||
if store.ID == "" {
|
|
||||||
initStore()
|
|
||||||
}
|
|
||||||
return store.ID
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetFirstTimeRun() bool {
|
|
||||||
lock.Lock()
|
|
||||||
defer lock.Unlock()
|
|
||||||
if store.ID == "" {
|
|
||||||
initStore()
|
|
||||||
}
|
|
||||||
return store.FirstTimeRun
|
|
||||||
}
|
|
||||||
|
|
||||||
func SetFirstTimeRun(val bool) {
|
|
||||||
lock.Lock()
|
|
||||||
defer lock.Unlock()
|
|
||||||
if store.FirstTimeRun == val {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
store.FirstTimeRun = val
|
|
||||||
writeStore(getStorePath())
|
|
||||||
}
|
|
||||||
|
|
||||||
// lock must be held
|
|
||||||
func initStore() {
|
|
||||||
storeFile, err := os.Open(getStorePath())
|
|
||||||
if err == nil {
|
|
||||||
defer storeFile.Close()
|
|
||||||
err = json.NewDecoder(storeFile).Decode(&store)
|
|
||||||
if err == nil {
|
|
||||||
slog.Debug(fmt.Sprintf("loaded existing store %s - ID: %s", getStorePath(), store.ID))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else if !errors.Is(err, os.ErrNotExist) {
|
|
||||||
slog.Debug(fmt.Sprintf("unexpected error searching for store: %s", err))
|
|
||||||
}
|
|
||||||
slog.Debug("initializing new store")
|
|
||||||
store.ID = uuid.New().String()
|
|
||||||
writeStore(getStorePath())
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeStore(storeFilename string) {
|
|
||||||
ollamaDir := filepath.Dir(storeFilename)
|
|
||||||
_, err := os.Stat(ollamaDir)
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
if err := os.MkdirAll(ollamaDir, 0o755); err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("create ollama dir %s: %v", ollamaDir, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
payload, err := json.Marshal(store)
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to marshal store: %s", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fp, err := os.OpenFile(storeFilename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o755)
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("write store payload %s: %v", storeFilename, err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
if n, err := fp.Write(payload); err != nil || n != len(payload) {
|
|
||||||
slog.Error(fmt.Sprintf("write store payload %s: %d vs %d -- %v", storeFilename, n, len(payload), err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
slog.Debug("Store contents: " + string(payload))
|
|
||||||
slog.Info(fmt.Sprintf("wrote store: %s", storeFilename))
|
|
||||||
}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
package store
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStorePath() string {
|
|
||||||
// TODO - system wide location?
|
|
||||||
|
|
||||||
home := os.Getenv("HOME")
|
|
||||||
return filepath.Join(home, "Library", "Application Support", "Ollama", "config.json")
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
package store
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStorePath() string {
|
|
||||||
if os.Geteuid() == 0 {
|
|
||||||
// TODO where should we store this on linux for system-wide operation?
|
|
||||||
return "/etc/ollama/config.json"
|
|
||||||
}
|
|
||||||
|
|
||||||
home := os.Getenv("HOME")
|
|
||||||
return filepath.Join(home, ".ollama", "config.json")
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
package store
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getStorePath() string {
|
|
||||||
localAppData := os.Getenv("LOCALAPPDATA")
|
|
||||||
return filepath.Join(localAppData, "Ollama", "config.json")
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
package commontray
|
|
||||||
|
|
||||||
var (
|
|
||||||
Title = "Ollama"
|
|
||||||
ToolTip = "Ollama"
|
|
||||||
|
|
||||||
UpdateIconName = "tray_upgrade"
|
|
||||||
IconName = "tray"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Callbacks struct {
|
|
||||||
Quit chan struct{}
|
|
||||||
Update chan struct{}
|
|
||||||
DoFirstUse chan struct{}
|
|
||||||
ShowLogs chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type OllamaTray interface {
|
|
||||||
GetCallbacks() Callbacks
|
|
||||||
Run()
|
|
||||||
UpdateAvailable(ver string) error
|
|
||||||
DisplayFirstUseNotification() error
|
|
||||||
Quit()
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
package tray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/app/assets"
|
|
||||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewTray() (commontray.OllamaTray, error) {
|
|
||||||
extension := ".png"
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
extension = ".ico"
|
|
||||||
}
|
|
||||||
iconName := commontray.UpdateIconName + extension
|
|
||||||
updateIcon, err := assets.GetIcon(iconName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load icon %s: %w", iconName, err)
|
|
||||||
}
|
|
||||||
iconName = commontray.IconName + extension
|
|
||||||
icon, err := assets.GetIcon(iconName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to load icon %s: %w", iconName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tray, err := InitPlatformTray(icon, updateIcon)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return tray, nil
|
|
||||||
}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
//go:build !windows
|
|
||||||
|
|
||||||
package tray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
|
||||||
)
|
|
||||||
|
|
||||||
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {
|
|
||||||
return nil, fmt.Errorf("NOT IMPLEMENTED YET")
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
package tray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
|
||||||
"github.com/jmorganca/ollama/app/tray/wintray"
|
|
||||||
)
|
|
||||||
|
|
||||||
func InitPlatformTray(icon, updateIcon []byte) (commontray.OllamaTray, error) {
|
|
||||||
return wintray.InitTray(icon, updateIcon)
|
|
||||||
}
|
|
||||||
@@ -1,184 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package wintray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"sync"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
quitOnce sync.Once
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t *winTray) Run() {
|
|
||||||
nativeLoop()
|
|
||||||
}
|
|
||||||
|
|
||||||
func nativeLoop() {
|
|
||||||
// Main message pump.
|
|
||||||
slog.Debug("starting event handling loop")
|
|
||||||
m := &struct {
|
|
||||||
WindowHandle windows.Handle
|
|
||||||
Message uint32
|
|
||||||
Wparam uintptr
|
|
||||||
Lparam uintptr
|
|
||||||
Time uint32
|
|
||||||
Pt point
|
|
||||||
LPrivate uint32
|
|
||||||
}{}
|
|
||||||
for {
|
|
||||||
ret, _, err := pGetMessage.Call(uintptr(unsafe.Pointer(m)), 0, 0, 0)
|
|
||||||
|
|
||||||
// If the function retrieves a message other than WM_QUIT, the return value is nonzero.
|
|
||||||
// If the function retrieves the WM_QUIT message, the return value is zero.
|
|
||||||
// If there is an error, the return value is -1
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644936(v=vs.85).aspx
|
|
||||||
switch int32(ret) {
|
|
||||||
case -1:
|
|
||||||
slog.Error(fmt.Sprintf("get message failure: %v", err))
|
|
||||||
return
|
|
||||||
case 0:
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
|
||||||
pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WindowProc callback function that processes messages sent to a window.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms633573(v=vs.85).aspx
|
|
||||||
func (t *winTray) wndProc(hWnd windows.Handle, message uint32, wParam, lParam uintptr) (lResult uintptr) {
|
|
||||||
const (
|
|
||||||
WM_RBUTTONUP = 0x0205
|
|
||||||
WM_LBUTTONUP = 0x0202
|
|
||||||
WM_COMMAND = 0x0111
|
|
||||||
WM_ENDSESSION = 0x0016
|
|
||||||
WM_CLOSE = 0x0010
|
|
||||||
WM_DESTROY = 0x0002
|
|
||||||
WM_MOUSEMOVE = 0x0200
|
|
||||||
WM_LBUTTONDOWN = 0x0201
|
|
||||||
)
|
|
||||||
switch message {
|
|
||||||
case WM_COMMAND:
|
|
||||||
menuItemId := int32(wParam)
|
|
||||||
// https://docs.microsoft.com/en-us/windows/win32/menurc/wm-command#menus
|
|
||||||
switch menuItemId {
|
|
||||||
case quitMenuID:
|
|
||||||
select {
|
|
||||||
case t.callbacks.Quit <- struct{}{}:
|
|
||||||
// should not happen but in case not listening
|
|
||||||
default:
|
|
||||||
slog.Error("no listener on Quit")
|
|
||||||
}
|
|
||||||
case updateMenuID:
|
|
||||||
select {
|
|
||||||
case t.callbacks.Update <- struct{}{}:
|
|
||||||
// should not happen but in case not listening
|
|
||||||
default:
|
|
||||||
slog.Error("no listener on Update")
|
|
||||||
}
|
|
||||||
case diagLogsMenuID:
|
|
||||||
select {
|
|
||||||
case t.callbacks.ShowLogs <- struct{}{}:
|
|
||||||
// should not happen but in case not listening
|
|
||||||
default:
|
|
||||||
slog.Error("no listener on ShowLogs")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
slog.Debug(fmt.Sprintf("Unexpected menu item id: %d", menuItemId))
|
|
||||||
}
|
|
||||||
case WM_CLOSE:
|
|
||||||
boolRet, _, err := pDestroyWindow.Call(uintptr(t.window))
|
|
||||||
if boolRet == 0 {
|
|
||||||
slog.Error(fmt.Sprintf("failed to destroy window: %s", err))
|
|
||||||
}
|
|
||||||
err = t.wcex.unregister()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to uregister windo %s", err))
|
|
||||||
}
|
|
||||||
case WM_DESTROY:
|
|
||||||
// same as WM_ENDSESSION, but throws 0 exit code after all
|
|
||||||
defer pPostQuitMessage.Call(uintptr(int32(0))) //nolint:errcheck
|
|
||||||
fallthrough
|
|
||||||
case WM_ENDSESSION:
|
|
||||||
t.muNID.Lock()
|
|
||||||
if t.nid != nil {
|
|
||||||
err := t.nid.delete()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to delete nid: %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t.muNID.Unlock()
|
|
||||||
case t.wmSystrayMessage:
|
|
||||||
switch lParam {
|
|
||||||
case WM_MOUSEMOVE, WM_LBUTTONDOWN:
|
|
||||||
// Ignore these...
|
|
||||||
case WM_RBUTTONUP, WM_LBUTTONUP:
|
|
||||||
err := t.showMenu()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to show menu: %s", err))
|
|
||||||
}
|
|
||||||
case 0x405: // TODO - how is this magic value derived for the notification left click
|
|
||||||
if t.pendingUpdate {
|
|
||||||
select {
|
|
||||||
case t.callbacks.Update <- struct{}{}:
|
|
||||||
// should not happen but in case not listening
|
|
||||||
default:
|
|
||||||
slog.Error("no listener on Update")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
select {
|
|
||||||
case t.callbacks.DoFirstUse <- struct{}{}:
|
|
||||||
// should not happen but in case not listening
|
|
||||||
default:
|
|
||||||
slog.Error("no listener on DoFirstUse")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case 0x404: // Middle click or close notification
|
|
||||||
// slog.Debug("doing nothing on close of first time notification")
|
|
||||||
default:
|
|
||||||
// 0x402 also seems common - what is it?
|
|
||||||
slog.Debug(fmt.Sprintf("unmanaged app message, lParm: 0x%x", lParam))
|
|
||||||
}
|
|
||||||
case t.wmTaskbarCreated: // on explorer.exe restarts
|
|
||||||
t.muNID.Lock()
|
|
||||||
err := t.nid.add()
|
|
||||||
if err != nil {
|
|
||||||
slog.Error(fmt.Sprintf("failed to refresh the taskbar on explorer restart: %s", err))
|
|
||||||
}
|
|
||||||
t.muNID.Unlock()
|
|
||||||
default:
|
|
||||||
// Calls the default window procedure to provide default processing for any window messages that an application does not process.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms633572(v=vs.85).aspx
|
|
||||||
lResult, _, _ = pDefWindowProc.Call(
|
|
||||||
uintptr(hWnd),
|
|
||||||
uintptr(message),
|
|
||||||
uintptr(wParam),
|
|
||||||
uintptr(lParam),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) Quit() {
|
|
||||||
quitOnce.Do(quit)
|
|
||||||
}
|
|
||||||
|
|
||||||
func quit() {
|
|
||||||
boolRet, _, err := pPostMessage.Call(
|
|
||||||
uintptr(wt.window),
|
|
||||||
WM_CLOSE,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
if boolRet == 0 {
|
|
||||||
slog.Error(fmt.Sprintf("failed to post close message on shutdown %s", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,71 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package wintray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
updatAvailableMenuID = 1
|
|
||||||
updateMenuID = updatAvailableMenuID + 1
|
|
||||||
separatorMenuID = updateMenuID + 1
|
|
||||||
diagLogsMenuID = separatorMenuID + 1
|
|
||||||
diagSeparatorMenuID = diagLogsMenuID + 1
|
|
||||||
quitMenuID = diagSeparatorMenuID + 1
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t *winTray) initMenus() error {
|
|
||||||
if err := t.addOrUpdateMenuItem(diagLogsMenuID, 0, diagLogsMenuTitle, false); err != nil {
|
|
||||||
return fmt.Errorf("unable to create menu entries %w\n", err)
|
|
||||||
}
|
|
||||||
if err := t.addSeparatorMenuItem(diagSeparatorMenuID, 0); err != nil {
|
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
|
||||||
}
|
|
||||||
if err := t.addOrUpdateMenuItem(quitMenuID, 0, quitMenuTitle, false); err != nil {
|
|
||||||
return fmt.Errorf("unable to create menu entries %w\n", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) UpdateAvailable(ver string) error {
|
|
||||||
if !t.updateNotified {
|
|
||||||
slog.Debug("updating menu and sending notification for new update")
|
|
||||||
if err := t.addOrUpdateMenuItem(updatAvailableMenuID, 0, updateAvailableMenuTitle, true); err != nil {
|
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
|
||||||
}
|
|
||||||
if err := t.addOrUpdateMenuItem(updateMenuID, 0, updateMenutTitle, false); err != nil {
|
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
|
||||||
}
|
|
||||||
if err := t.addSeparatorMenuItem(separatorMenuID, 0); err != nil {
|
|
||||||
return fmt.Errorf("unable to create menu entries %w", err)
|
|
||||||
}
|
|
||||||
iconFilePath, err := iconBytesToFilePath(wt.updateIcon)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to write icon data to temp file: %w", err)
|
|
||||||
}
|
|
||||||
if err := wt.setIcon(iconFilePath); err != nil {
|
|
||||||
return fmt.Errorf("unable to set icon: %w", err)
|
|
||||||
}
|
|
||||||
t.updateNotified = true
|
|
||||||
|
|
||||||
t.pendingUpdate = true
|
|
||||||
// Now pop up the notification
|
|
||||||
t.muNID.Lock()
|
|
||||||
defer t.muNID.Unlock()
|
|
||||||
copy(t.nid.InfoTitle[:], windows.StringToUTF16(updateTitle))
|
|
||||||
copy(t.nid.Info[:], windows.StringToUTF16(fmt.Sprintf(updateMessage, ver)))
|
|
||||||
t.nid.Flags |= NIF_INFO
|
|
||||||
t.nid.Timeout = 10
|
|
||||||
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
|
||||||
err = t.nid.modify()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package wintray
|
|
||||||
|
|
||||||
const (
|
|
||||||
firstTimeTitle = "Ollama is running"
|
|
||||||
firstTimeMessage = "Click here to get started"
|
|
||||||
updateTitle = "Update available"
|
|
||||||
updateMessage = "Ollama version %s is ready to install"
|
|
||||||
|
|
||||||
quitMenuTitle = "Quit Ollama"
|
|
||||||
updateAvailableMenuTitle = "An update is available"
|
|
||||||
updateMenutTitle = "Restart to update"
|
|
||||||
diagLogsMenuTitle = "View logs"
|
|
||||||
)
|
|
||||||
@@ -1,66 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package wintray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Contains information that the system needs to display notifications in the notification area.
|
|
||||||
// Used by Shell_NotifyIcon.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/bb773352(v=vs.85).aspx
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159
|
|
||||||
type notifyIconData struct {
|
|
||||||
Size uint32
|
|
||||||
Wnd windows.Handle
|
|
||||||
ID, Flags, CallbackMessage uint32
|
|
||||||
Icon windows.Handle
|
|
||||||
Tip [128]uint16
|
|
||||||
State, StateMask uint32
|
|
||||||
Info [256]uint16
|
|
||||||
// Timeout, Version uint32
|
|
||||||
Timeout uint32
|
|
||||||
|
|
||||||
InfoTitle [64]uint16
|
|
||||||
InfoFlags uint32
|
|
||||||
GuidItem windows.GUID
|
|
||||||
BalloonIcon windows.Handle
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nid *notifyIconData) add() error {
|
|
||||||
const NIM_ADD = 0x00000000
|
|
||||||
res, _, err := pShellNotifyIcon.Call(
|
|
||||||
uintptr(NIM_ADD),
|
|
||||||
uintptr(unsafe.Pointer(nid)),
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nid *notifyIconData) modify() error {
|
|
||||||
const NIM_MODIFY = 0x00000001
|
|
||||||
res, _, err := pShellNotifyIcon.Call(
|
|
||||||
uintptr(NIM_MODIFY),
|
|
||||||
uintptr(unsafe.Pointer(nid)),
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (nid *notifyIconData) delete() error {
|
|
||||||
const NIM_DELETE = 0x00000002
|
|
||||||
res, _, err := pShellNotifyIcon.Call(
|
|
||||||
uintptr(NIM_DELETE),
|
|
||||||
uintptr(unsafe.Pointer(nid)),
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,485 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package wintray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/app/tray/commontray"
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Helpful sources: https://github.com/golang/exp/blob/master/shiny/driver/internal/win32
|
|
||||||
|
|
||||||
// Contains information about loaded resources
|
|
||||||
type winTray struct {
|
|
||||||
instance,
|
|
||||||
icon,
|
|
||||||
cursor,
|
|
||||||
window windows.Handle
|
|
||||||
|
|
||||||
loadedImages map[string]windows.Handle
|
|
||||||
muLoadedImages sync.RWMutex
|
|
||||||
|
|
||||||
// menus keeps track of the submenus keyed by the menu item ID, plus 0
|
|
||||||
// which corresponds to the main popup menu.
|
|
||||||
menus map[uint32]windows.Handle
|
|
||||||
muMenus sync.RWMutex
|
|
||||||
menuOf map[uint32]windows.Handle
|
|
||||||
muMenuOf sync.RWMutex
|
|
||||||
// menuItemIcons maintains the bitmap of each menu item (if applies). It's
|
|
||||||
// needed to show the icon correctly when showing a previously hidden menu
|
|
||||||
// item again.
|
|
||||||
// menuItemIcons map[uint32]windows.Handle
|
|
||||||
// muMenuItemIcons sync.RWMutex
|
|
||||||
visibleItems map[uint32][]uint32
|
|
||||||
muVisibleItems sync.RWMutex
|
|
||||||
|
|
||||||
nid *notifyIconData
|
|
||||||
muNID sync.RWMutex
|
|
||||||
wcex *wndClassEx
|
|
||||||
|
|
||||||
wmSystrayMessage,
|
|
||||||
wmTaskbarCreated uint32
|
|
||||||
|
|
||||||
pendingUpdate bool
|
|
||||||
updateNotified bool // Only pop up the notification once - TODO consider daily nag?
|
|
||||||
// Callbacks
|
|
||||||
callbacks commontray.Callbacks
|
|
||||||
normalIcon []byte
|
|
||||||
updateIcon []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
var wt winTray
|
|
||||||
|
|
||||||
func (t *winTray) GetCallbacks() commontray.Callbacks {
|
|
||||||
return t.callbacks
|
|
||||||
}
|
|
||||||
|
|
||||||
func InitTray(icon, updateIcon []byte) (*winTray, error) {
|
|
||||||
wt.callbacks.Quit = make(chan struct{})
|
|
||||||
wt.callbacks.Update = make(chan struct{})
|
|
||||||
wt.callbacks.ShowLogs = make(chan struct{})
|
|
||||||
wt.callbacks.DoFirstUse = make(chan struct{})
|
|
||||||
wt.normalIcon = icon
|
|
||||||
wt.updateIcon = updateIcon
|
|
||||||
if err := wt.initInstance(); err != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to init instance: %w\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := wt.createMenu(); err != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to create menu: %w\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
iconFilePath, err := iconBytesToFilePath(wt.normalIcon)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to write icon data to temp file: %w", err)
|
|
||||||
}
|
|
||||||
if err := wt.setIcon(iconFilePath); err != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to set icon: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &wt, wt.initMenus()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) initInstance() error {
|
|
||||||
const (
|
|
||||||
className = "OllamaClass"
|
|
||||||
windowName = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
t.wmSystrayMessage = WM_USER + 1
|
|
||||||
t.visibleItems = make(map[uint32][]uint32)
|
|
||||||
t.menus = make(map[uint32]windows.Handle)
|
|
||||||
t.menuOf = make(map[uint32]windows.Handle)
|
|
||||||
|
|
||||||
t.loadedImages = make(map[string]windows.Handle)
|
|
||||||
|
|
||||||
taskbarEventNamePtr, _ := windows.UTF16PtrFromString("TaskbarCreated")
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644947
|
|
||||||
res, _, err := pRegisterWindowMessage.Call(
|
|
||||||
uintptr(unsafe.Pointer(taskbarEventNamePtr)),
|
|
||||||
)
|
|
||||||
if res == 0 { // success 0xc000-0xfff
|
|
||||||
return fmt.Errorf("failed to register window: %w", err)
|
|
||||||
}
|
|
||||||
t.wmTaskbarCreated = uint32(res)
|
|
||||||
|
|
||||||
instanceHandle, _, err := pGetModuleHandle.Call(0)
|
|
||||||
if instanceHandle == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.instance = windows.Handle(instanceHandle)
|
|
||||||
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms648072(v=vs.85).aspx
|
|
||||||
iconHandle, _, err := pLoadIcon.Call(0, uintptr(IDI_APPLICATION))
|
|
||||||
if iconHandle == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.icon = windows.Handle(iconHandle)
|
|
||||||
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms648391(v=vs.85).aspx
|
|
||||||
cursorHandle, _, err := pLoadCursor.Call(0, uintptr(IDC_ARROW))
|
|
||||||
if cursorHandle == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.cursor = windows.Handle(cursorHandle)
|
|
||||||
|
|
||||||
classNamePtr, err := windows.UTF16PtrFromString(className)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
windowNamePtr, err := windows.UTF16PtrFromString(windowName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.wcex = &wndClassEx{
|
|
||||||
Style: CS_HREDRAW | CS_VREDRAW,
|
|
||||||
WndProc: windows.NewCallback(t.wndProc),
|
|
||||||
Instance: t.instance,
|
|
||||||
Icon: t.icon,
|
|
||||||
Cursor: t.cursor,
|
|
||||||
Background: windows.Handle(6), // (COLOR_WINDOW + 1)
|
|
||||||
ClassName: classNamePtr,
|
|
||||||
IconSm: t.icon,
|
|
||||||
}
|
|
||||||
if err := t.wcex.register(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
windowHandle, _, err := pCreateWindowEx.Call(
|
|
||||||
uintptr(0),
|
|
||||||
uintptr(unsafe.Pointer(classNamePtr)),
|
|
||||||
uintptr(unsafe.Pointer(windowNamePtr)),
|
|
||||||
uintptr(WS_OVERLAPPEDWINDOW),
|
|
||||||
uintptr(CW_USEDEFAULT),
|
|
||||||
uintptr(CW_USEDEFAULT),
|
|
||||||
uintptr(CW_USEDEFAULT),
|
|
||||||
uintptr(CW_USEDEFAULT),
|
|
||||||
uintptr(0),
|
|
||||||
uintptr(0),
|
|
||||||
uintptr(t.instance),
|
|
||||||
uintptr(0),
|
|
||||||
)
|
|
||||||
if windowHandle == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.window = windows.Handle(windowHandle)
|
|
||||||
|
|
||||||
pShowWindow.Call(uintptr(t.window), uintptr(SW_HIDE)) //nolint:errcheck
|
|
||||||
|
|
||||||
boolRet, _, err := pUpdateWindow.Call(uintptr(t.window))
|
|
||||||
if boolRet == 0 {
|
|
||||||
slog.Error(fmt.Sprintf("failed to update window: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
t.muNID.Lock()
|
|
||||||
defer t.muNID.Unlock()
|
|
||||||
t.nid = ¬ifyIconData{
|
|
||||||
Wnd: windows.Handle(t.window),
|
|
||||||
ID: 100,
|
|
||||||
Flags: NIF_MESSAGE,
|
|
||||||
CallbackMessage: t.wmSystrayMessage,
|
|
||||||
}
|
|
||||||
t.nid.Size = uint32(unsafe.Sizeof(*t.nid))
|
|
||||||
|
|
||||||
return t.nid.add()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) createMenu() error {
|
|
||||||
|
|
||||||
menuHandle, _, err := pCreatePopupMenu.Call()
|
|
||||||
if menuHandle == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.menus[0] = windows.Handle(menuHandle)
|
|
||||||
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647575(v=vs.85).aspx
|
|
||||||
mi := struct {
|
|
||||||
Size, Mask, Style, Max uint32
|
|
||||||
Background windows.Handle
|
|
||||||
ContextHelpID uint32
|
|
||||||
MenuData uintptr
|
|
||||||
}{
|
|
||||||
Mask: MIM_APPLYTOSUBMENUS,
|
|
||||||
}
|
|
||||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
|
||||||
|
|
||||||
res, _, err := pSetMenuInfo.Call(
|
|
||||||
uintptr(t.menus[0]),
|
|
||||||
uintptr(unsafe.Pointer(&mi)),
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains information about a menu item.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms647578(v=vs.85).aspx
|
|
||||||
type menuItemInfo struct {
|
|
||||||
Size, Mask, Type, State uint32
|
|
||||||
ID uint32
|
|
||||||
SubMenu, Checked, Unchecked windows.Handle
|
|
||||||
ItemData uintptr
|
|
||||||
TypeData *uint16
|
|
||||||
Cch uint32
|
|
||||||
BMPItem windows.Handle
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title string, disabled bool) error {
|
|
||||||
titlePtr, err := windows.UTF16PtrFromString(title)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mi := menuItemInfo{
|
|
||||||
Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE,
|
|
||||||
Type: MFT_STRING,
|
|
||||||
ID: uint32(menuItemId),
|
|
||||||
TypeData: titlePtr,
|
|
||||||
Cch: uint32(len(title)),
|
|
||||||
}
|
|
||||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
|
||||||
if disabled {
|
|
||||||
mi.State |= MFS_DISABLED
|
|
||||||
}
|
|
||||||
|
|
||||||
var res uintptr
|
|
||||||
t.muMenus.RLock()
|
|
||||||
menu := t.menus[parentId]
|
|
||||||
t.muMenus.RUnlock()
|
|
||||||
if t.getVisibleItemIndex(parentId, menuItemId) != -1 {
|
|
||||||
// We set the menu item info based on the menuID
|
|
||||||
boolRet, _, err := pSetMenuItemInfo.Call(
|
|
||||||
uintptr(menu),
|
|
||||||
uintptr(menuItemId),
|
|
||||||
0,
|
|
||||||
uintptr(unsafe.Pointer(&mi)),
|
|
||||||
)
|
|
||||||
if boolRet == 0 {
|
|
||||||
return fmt.Errorf("failed to set menu item: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if res == 0 {
|
|
||||||
// Menu item does not already exist, create it
|
|
||||||
t.muMenus.RLock()
|
|
||||||
submenu, exists := t.menus[menuItemId]
|
|
||||||
t.muMenus.RUnlock()
|
|
||||||
if exists {
|
|
||||||
mi.Mask |= MIIM_SUBMENU
|
|
||||||
mi.SubMenu = submenu
|
|
||||||
}
|
|
||||||
t.addToVisibleItems(parentId, menuItemId)
|
|
||||||
position := t.getVisibleItemIndex(parentId, menuItemId)
|
|
||||||
res, _, err = pInsertMenuItem.Call(
|
|
||||||
uintptr(menu),
|
|
||||||
uintptr(position),
|
|
||||||
1,
|
|
||||||
uintptr(unsafe.Pointer(&mi)),
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
t.delFromVisibleItems(parentId, menuItemId)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.muMenuOf.Lock()
|
|
||||||
t.menuOf[menuItemId] = menu
|
|
||||||
t.muMenuOf.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error {
|
|
||||||
|
|
||||||
mi := menuItemInfo{
|
|
||||||
Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE,
|
|
||||||
Type: MFT_SEPARATOR,
|
|
||||||
ID: uint32(menuItemId),
|
|
||||||
}
|
|
||||||
|
|
||||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
|
||||||
|
|
||||||
t.addToVisibleItems(parentId, menuItemId)
|
|
||||||
position := t.getVisibleItemIndex(parentId, menuItemId)
|
|
||||||
t.muMenus.RLock()
|
|
||||||
menu := uintptr(t.menus[parentId])
|
|
||||||
t.muMenus.RUnlock()
|
|
||||||
res, _, err := pInsertMenuItem.Call(
|
|
||||||
menu,
|
|
||||||
uintptr(position),
|
|
||||||
1,
|
|
||||||
uintptr(unsafe.Pointer(&mi)),
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// func (t *winTray) hideMenuItem(menuItemId, parentId uint32) error {
|
|
||||||
// const ERROR_SUCCESS syscall.Errno = 0
|
|
||||||
|
|
||||||
// t.muMenus.RLock()
|
|
||||||
// menu := uintptr(t.menus[parentId])
|
|
||||||
// t.muMenus.RUnlock()
|
|
||||||
// res, _, err := pRemoveMenu.Call(
|
|
||||||
// menu,
|
|
||||||
// uintptr(menuItemId),
|
|
||||||
// MF_BYCOMMAND,
|
|
||||||
// )
|
|
||||||
// if res == 0 && err.(syscall.Errno) != ERROR_SUCCESS {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// t.delFromVisibleItems(parentId, menuItemId)
|
|
||||||
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
|
|
||||||
func (t *winTray) showMenu() error {
|
|
||||||
p := point{}
|
|
||||||
boolRet, _, err := pGetCursorPos.Call(uintptr(unsafe.Pointer(&p)))
|
|
||||||
if boolRet == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
boolRet, _, err = pSetForegroundWindow.Call(uintptr(t.window))
|
|
||||||
if boolRet == 0 {
|
|
||||||
slog.Warn(fmt.Sprintf("failed to bring menu to foreground: %s", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
boolRet, _, err = pTrackPopupMenu.Call(
|
|
||||||
uintptr(t.menus[0]),
|
|
||||||
TPM_BOTTOMALIGN|TPM_LEFTALIGN,
|
|
||||||
uintptr(p.X),
|
|
||||||
uintptr(p.Y),
|
|
||||||
0,
|
|
||||||
uintptr(t.window),
|
|
||||||
0,
|
|
||||||
)
|
|
||||||
if boolRet == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) delFromVisibleItems(parent, val uint32) {
|
|
||||||
t.muVisibleItems.Lock()
|
|
||||||
defer t.muVisibleItems.Unlock()
|
|
||||||
visibleItems := t.visibleItems[parent]
|
|
||||||
for i, itemval := range visibleItems {
|
|
||||||
if val == itemval {
|
|
||||||
t.visibleItems[parent] = append(visibleItems[:i], visibleItems[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) addToVisibleItems(parent, val uint32) {
|
|
||||||
t.muVisibleItems.Lock()
|
|
||||||
defer t.muVisibleItems.Unlock()
|
|
||||||
if visibleItems, exists := t.visibleItems[parent]; !exists {
|
|
||||||
t.visibleItems[parent] = []uint32{val}
|
|
||||||
} else {
|
|
||||||
newvisible := append(visibleItems, val)
|
|
||||||
sort.Slice(newvisible, func(i, j int) bool { return newvisible[i] < newvisible[j] })
|
|
||||||
t.visibleItems[parent] = newvisible
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) getVisibleItemIndex(parent, val uint32) int {
|
|
||||||
t.muVisibleItems.RLock()
|
|
||||||
defer t.muVisibleItems.RUnlock()
|
|
||||||
for i, itemval := range t.visibleItems[parent] {
|
|
||||||
if val == itemval {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func iconBytesToFilePath(iconBytes []byte) (string, error) {
|
|
||||||
bh := md5.Sum(iconBytes)
|
|
||||||
dataHash := hex.EncodeToString(bh[:])
|
|
||||||
iconFilePath := filepath.Join(os.TempDir(), "ollama_temp_icon_"+dataHash)
|
|
||||||
|
|
||||||
if _, err := os.Stat(iconFilePath); os.IsNotExist(err) {
|
|
||||||
if err := os.WriteFile(iconFilePath, iconBytes, 0644); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return iconFilePath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loads an image from file and shows it in tray.
|
|
||||||
// Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx
|
|
||||||
func (t *winTray) setIcon(src string) error {
|
|
||||||
|
|
||||||
h, err := t.loadIconFrom(src)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.muNID.Lock()
|
|
||||||
defer t.muNID.Unlock()
|
|
||||||
t.nid.Icon = h
|
|
||||||
t.nid.Flags |= NIF_ICON
|
|
||||||
t.nid.Size = uint32(unsafe.Sizeof(*t.nid))
|
|
||||||
|
|
||||||
return t.nid.modify()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loads an image from file to be shown in tray or menu item.
|
|
||||||
// LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx
|
|
||||||
func (t *winTray) loadIconFrom(src string) (windows.Handle, error) {
|
|
||||||
|
|
||||||
// Save and reuse handles of loaded images
|
|
||||||
t.muLoadedImages.RLock()
|
|
||||||
h, ok := t.loadedImages[src]
|
|
||||||
t.muLoadedImages.RUnlock()
|
|
||||||
if !ok {
|
|
||||||
srcPtr, err := windows.UTF16PtrFromString(src)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
res, _, err := pLoadImage.Call(
|
|
||||||
0,
|
|
||||||
uintptr(unsafe.Pointer(srcPtr)),
|
|
||||||
IMAGE_ICON,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
LR_LOADFROMFILE|LR_DEFAULTSIZE,
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
h = windows.Handle(res)
|
|
||||||
t.muLoadedImages.Lock()
|
|
||||||
t.loadedImages[src] = h
|
|
||||||
t.muLoadedImages.Unlock()
|
|
||||||
}
|
|
||||||
return h, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *winTray) DisplayFirstUseNotification() error {
|
|
||||||
t.muNID.Lock()
|
|
||||||
defer t.muNID.Unlock()
|
|
||||||
copy(t.nid.InfoTitle[:], windows.StringToUTF16(firstTimeTitle))
|
|
||||||
copy(t.nid.Info[:], windows.StringToUTF16(firstTimeMessage))
|
|
||||||
t.nid.Flags |= NIF_INFO
|
|
||||||
t.nid.Size = uint32(unsafe.Sizeof(*wt.nid))
|
|
||||||
|
|
||||||
return t.nid.modify()
|
|
||||||
}
|
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package wintray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
k32 = windows.NewLazySystemDLL("Kernel32.dll")
|
|
||||||
u32 = windows.NewLazySystemDLL("User32.dll")
|
|
||||||
s32 = windows.NewLazySystemDLL("Shell32.dll")
|
|
||||||
|
|
||||||
pCreatePopupMenu = u32.NewProc("CreatePopupMenu")
|
|
||||||
pCreateWindowEx = u32.NewProc("CreateWindowExW")
|
|
||||||
pDefWindowProc = u32.NewProc("DefWindowProcW")
|
|
||||||
pDestroyWindow = u32.NewProc("DestroyWindow")
|
|
||||||
pDispatchMessage = u32.NewProc("DispatchMessageW")
|
|
||||||
pGetCursorPos = u32.NewProc("GetCursorPos")
|
|
||||||
pGetMessage = u32.NewProc("GetMessageW")
|
|
||||||
pGetModuleHandle = k32.NewProc("GetModuleHandleW")
|
|
||||||
pInsertMenuItem = u32.NewProc("InsertMenuItemW")
|
|
||||||
pLoadCursor = u32.NewProc("LoadCursorW")
|
|
||||||
pLoadIcon = u32.NewProc("LoadIconW")
|
|
||||||
pLoadImage = u32.NewProc("LoadImageW")
|
|
||||||
pPostMessage = u32.NewProc("PostMessageW")
|
|
||||||
pPostQuitMessage = u32.NewProc("PostQuitMessage")
|
|
||||||
pRegisterClass = u32.NewProc("RegisterClassExW")
|
|
||||||
pRegisterWindowMessage = u32.NewProc("RegisterWindowMessageW")
|
|
||||||
pSetForegroundWindow = u32.NewProc("SetForegroundWindow")
|
|
||||||
pSetMenuInfo = u32.NewProc("SetMenuInfo")
|
|
||||||
pSetMenuItemInfo = u32.NewProc("SetMenuItemInfoW")
|
|
||||||
pShellNotifyIcon = s32.NewProc("Shell_NotifyIconW")
|
|
||||||
pShowWindow = u32.NewProc("ShowWindow")
|
|
||||||
pTrackPopupMenu = u32.NewProc("TrackPopupMenu")
|
|
||||||
pTranslateMessage = u32.NewProc("TranslateMessage")
|
|
||||||
pUnregisterClass = u32.NewProc("UnregisterClassW")
|
|
||||||
pUpdateWindow = u32.NewProc("UpdateWindow")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
CS_HREDRAW = 0x0002
|
|
||||||
CS_VREDRAW = 0x0001
|
|
||||||
CW_USEDEFAULT = 0x80000000
|
|
||||||
IDC_ARROW = 32512 // Standard arrow
|
|
||||||
IDI_APPLICATION = 32512
|
|
||||||
IMAGE_ICON = 1 // Loads an icon
|
|
||||||
LR_DEFAULTSIZE = 0x00000040 // Loads default-size icon for windows(SM_CXICON x SM_CYICON) if cx, cy are set to zero
|
|
||||||
LR_LOADFROMFILE = 0x00000010 // Loads the stand-alone image from the file
|
|
||||||
MF_BYCOMMAND = 0x00000000
|
|
||||||
MFS_DISABLED = 0x00000003
|
|
||||||
MFT_SEPARATOR = 0x00000800
|
|
||||||
MFT_STRING = 0x00000000
|
|
||||||
MIIM_BITMAP = 0x00000080
|
|
||||||
MIIM_FTYPE = 0x00000100
|
|
||||||
MIIM_ID = 0x00000002
|
|
||||||
MIIM_STATE = 0x00000001
|
|
||||||
MIIM_STRING = 0x00000040
|
|
||||||
MIIM_SUBMENU = 0x00000004
|
|
||||||
MIM_APPLYTOSUBMENUS = 0x80000000
|
|
||||||
NIF_ICON = 0x00000002
|
|
||||||
NIF_INFO = 0x00000010
|
|
||||||
NIF_MESSAGE = 0x00000001
|
|
||||||
SW_HIDE = 0
|
|
||||||
TPM_BOTTOMALIGN = 0x0020
|
|
||||||
TPM_LEFTALIGN = 0x0000
|
|
||||||
WM_CLOSE = 0x0010
|
|
||||||
WM_USER = 0x0400
|
|
||||||
WS_CAPTION = 0x00C00000
|
|
||||||
WS_MAXIMIZEBOX = 0x00010000
|
|
||||||
WS_MINIMIZEBOX = 0x00020000
|
|
||||||
WS_OVERLAPPED = 0x00000000
|
|
||||||
WS_OVERLAPPEDWINDOW = WS_OVERLAPPED | WS_CAPTION | WS_SYSMENU | WS_THICKFRAME | WS_MINIMIZEBOX | WS_MAXIMIZEBOX
|
|
||||||
WS_SYSMENU = 0x00080000
|
|
||||||
WS_THICKFRAME = 0x00040000
|
|
||||||
)
|
|
||||||
|
|
||||||
// Not sure if this is actually needed on windows
|
|
||||||
func init() {
|
|
||||||
runtime.LockOSThread()
|
|
||||||
}
|
|
||||||
|
|
||||||
// The POINT structure defines the x- and y- coordinates of a point.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/dd162805(v=vs.85).aspx
|
|
||||||
type point struct {
|
|
||||||
X, Y int32
|
|
||||||
}
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package wintray
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Contains window class information.
|
|
||||||
// It is used with the RegisterClassEx and GetClassInfoEx functions.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/ms633577.aspx
|
|
||||||
type wndClassEx struct {
|
|
||||||
Size, Style uint32
|
|
||||||
WndProc uintptr
|
|
||||||
ClsExtra, WndExtra int32
|
|
||||||
Instance, Icon, Cursor, Background windows.Handle
|
|
||||||
MenuName, ClassName *uint16
|
|
||||||
IconSm windows.Handle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Registers a window class for subsequent use in calls to the CreateWindow or CreateWindowEx function.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/ms633587.aspx
|
|
||||||
func (w *wndClassEx) register() error {
|
|
||||||
w.Size = uint32(unsafe.Sizeof(*w))
|
|
||||||
res, _, err := pRegisterClass.Call(uintptr(unsafe.Pointer(w)))
|
|
||||||
if res == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unregisters a window class, freeing the memory required for the class.
|
|
||||||
// https://msdn.microsoft.com/en-us/library/ms644899.aspx
|
|
||||||
func (w *wndClassEx) unregister() error {
|
|
||||||
res, _, err := pUnregisterClass.Call(
|
|
||||||
uintptr(unsafe.Pointer(w.ClassName)),
|
|
||||||
uintptr(w.Instance),
|
|
||||||
)
|
|
||||||
if res == 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
61
auth/auth.go
@@ -1,61 +0,0 @@
|
|||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultPrivateKey = "id_ed25519"
|
|
||||||
|
|
||||||
func NewNonce(r io.Reader, length int) (string, error) {
|
|
||||||
nonce := make([]byte, length)
|
|
||||||
if _, err := io.ReadFull(r, nonce); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return base64.RawURLEncoding.EncodeToString(nonce), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func Sign(ctx context.Context, bts []byte) (string, error) {
|
|
||||||
home, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
keyPath := filepath.Join(home, ".ollama", defaultPrivateKey)
|
|
||||||
|
|
||||||
privateKeyFile, err := os.ReadFile(keyPath)
|
|
||||||
if err != nil {
|
|
||||||
slog.Info(fmt.Sprintf("Failed to load private key: %v", err))
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
privateKey, err := ssh.ParsePrivateKey(privateKeyFile)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the pubkey, but remove the type
|
|
||||||
publicKey := ssh.MarshalAuthorizedKey(privateKey.PublicKey())
|
|
||||||
parts := bytes.Split(publicKey, []byte(" "))
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return "", fmt.Errorf("malformed public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
signedData, err := privateKey.Sign(rand.Reader, bts)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// signature is <pubkey>:<signature>
|
|
||||||
return fmt.Sprintf("%s:%s", bytes.TrimSpace(parts[1]), base64.StdEncoding.EncodeToString(signedData.Blob)), nil
|
|
||||||
}
|
|
||||||
843
cmd/cmd.go
@@ -14,15 +14,15 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/console"
|
|
||||||
|
|
||||||
"github.com/olekukonko/tablewriter"
|
"github.com/olekukonko/tablewriter"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
@@ -33,10 +33,13 @@ import (
|
|||||||
"github.com/jmorganca/ollama/format"
|
"github.com/jmorganca/ollama/format"
|
||||||
"github.com/jmorganca/ollama/parser"
|
"github.com/jmorganca/ollama/parser"
|
||||||
"github.com/jmorganca/ollama/progress"
|
"github.com/jmorganca/ollama/progress"
|
||||||
|
"github.com/jmorganca/ollama/readline"
|
||||||
"github.com/jmorganca/ollama/server"
|
"github.com/jmorganca/ollama/server"
|
||||||
"github.com/jmorganca/ollama/version"
|
"github.com/jmorganca/ollama/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type ImageData []byte
|
||||||
|
|
||||||
func CreateHandler(cmd *cobra.Command, args []string) error {
|
func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||||
filename, _ := cmd.Flags().GetString("file")
|
filename, _ := cmd.Flags().GetString("file")
|
||||||
filename, err := filepath.Abs(filename)
|
filename, err := filepath.Abs(filename)
|
||||||
@@ -148,68 +151,19 @@ func RunHandler(cmd *cobra.Command, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
name := args[0]
|
name := args[0]
|
||||||
|
|
||||||
// check if the model exists on the server
|
// check if the model exists on the server
|
||||||
show, err := client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
_, err = client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
||||||
var statusError api.StatusError
|
var statusError api.StatusError
|
||||||
switch {
|
switch {
|
||||||
case errors.As(err, &statusError) && statusError.StatusCode == http.StatusNotFound:
|
case errors.As(err, &statusError) && statusError.StatusCode == http.StatusNotFound:
|
||||||
if err := PullHandler(cmd, []string{name}); err != nil {
|
if err := PullHandler(cmd, args); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
show, err = client.Show(cmd.Context(), &api.ShowRequest{Name: name})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case err != nil:
|
case err != nil:
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
interactive := true
|
return RunGenerate(cmd, args)
|
||||||
|
|
||||||
opts := runOptions{
|
|
||||||
Model: args[0],
|
|
||||||
WordWrap: os.Getenv("TERM") == "xterm-256color",
|
|
||||||
Options: map[string]interface{}{},
|
|
||||||
MultiModal: slices.Contains(show.Details.Families, "clip"),
|
|
||||||
ParentModel: show.Details.ParentModel,
|
|
||||||
}
|
|
||||||
|
|
||||||
format, err := cmd.Flags().GetString("format")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
opts.Format = format
|
|
||||||
|
|
||||||
prompts := args[1:]
|
|
||||||
// prepend stdin to the prompt if provided
|
|
||||||
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
|
||||||
in, err := io.ReadAll(os.Stdin)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
prompts = append([]string{string(in)}, prompts...)
|
|
||||||
opts.WordWrap = false
|
|
||||||
interactive = false
|
|
||||||
}
|
|
||||||
opts.Prompt = strings.Join(prompts, " ")
|
|
||||||
if len(prompts) > 0 {
|
|
||||||
interactive = false
|
|
||||||
}
|
|
||||||
|
|
||||||
nowrap, err := cmd.Flags().GetBool("nowordwrap")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
opts.WordWrap = !nowrap
|
|
||||||
|
|
||||||
if !interactive {
|
|
||||||
return generate(cmd, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return generateInteractive(cmd, opts)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func PushHandler(cmd *cobra.Command, args []string) error {
|
func PushHandler(cmd *cobra.Command, args []string) error {
|
||||||
@@ -461,139 +415,66 @@ func PullHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RunGenerate(cmd *cobra.Command, args []string) error {
|
||||||
|
interactive := true
|
||||||
|
|
||||||
|
opts := generateOptions{
|
||||||
|
Model: args[0],
|
||||||
|
WordWrap: os.Getenv("TERM") == "xterm-256color",
|
||||||
|
Options: map[string]interface{}{},
|
||||||
|
Images: []ImageData{},
|
||||||
|
}
|
||||||
|
|
||||||
|
format, err := cmd.Flags().GetString("format")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.Format = format
|
||||||
|
|
||||||
|
prompts := args[1:]
|
||||||
|
// prepend stdin to the prompt if provided
|
||||||
|
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||||
|
in, err := io.ReadAll(os.Stdin)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
prompts = append([]string{string(in)}, prompts...)
|
||||||
|
opts.WordWrap = false
|
||||||
|
interactive = false
|
||||||
|
}
|
||||||
|
opts.Prompt = strings.Join(prompts, " ")
|
||||||
|
if len(prompts) > 0 {
|
||||||
|
interactive = false
|
||||||
|
}
|
||||||
|
|
||||||
|
nowrap, err := cmd.Flags().GetBool("nowordwrap")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.WordWrap = !nowrap
|
||||||
|
|
||||||
|
if !interactive {
|
||||||
|
return generate(cmd, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
return generateInteractive(cmd, opts)
|
||||||
|
}
|
||||||
|
|
||||||
type generateContextKey string
|
type generateContextKey string
|
||||||
|
|
||||||
type runOptions struct {
|
type generateOptions struct {
|
||||||
Model string
|
Model string
|
||||||
ParentModel string
|
Prompt string
|
||||||
Prompt string
|
WordWrap bool
|
||||||
Messages []api.Message
|
Format string
|
||||||
WordWrap bool
|
System string
|
||||||
Format string
|
Template string
|
||||||
System string
|
Images []ImageData
|
||||||
Template string
|
Options map[string]interface{}
|
||||||
Images []api.ImageData
|
|
||||||
Options map[string]interface{}
|
|
||||||
MultiModal bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type displayResponseState struct {
|
func generate(cmd *cobra.Command, opts generateOptions) error {
|
||||||
lineLength int
|
|
||||||
wordBuffer string
|
|
||||||
}
|
|
||||||
|
|
||||||
func displayResponse(content string, wordWrap bool, state *displayResponseState) {
|
|
||||||
termWidth, _, _ := term.GetSize(int(os.Stdout.Fd()))
|
|
||||||
if wordWrap && termWidth >= 10 {
|
|
||||||
for _, ch := range content {
|
|
||||||
if state.lineLength+1 > termWidth-5 {
|
|
||||||
if len(state.wordBuffer) > termWidth-10 {
|
|
||||||
fmt.Printf("%s%c", state.wordBuffer, ch)
|
|
||||||
state.wordBuffer = ""
|
|
||||||
state.lineLength = 0
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// backtrack the length of the last word and clear to the end of the line
|
|
||||||
fmt.Printf("\x1b[%dD\x1b[K\n", len(state.wordBuffer))
|
|
||||||
fmt.Printf("%s%c", state.wordBuffer, ch)
|
|
||||||
state.lineLength = len(state.wordBuffer) + 1
|
|
||||||
} else {
|
|
||||||
fmt.Print(string(ch))
|
|
||||||
state.lineLength += 1
|
|
||||||
|
|
||||||
switch ch {
|
|
||||||
case ' ':
|
|
||||||
state.wordBuffer = ""
|
|
||||||
case '\n':
|
|
||||||
state.lineLength = 0
|
|
||||||
default:
|
|
||||||
state.wordBuffer += string(ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf("%s%s", state.wordBuffer, content)
|
|
||||||
if len(state.wordBuffer) > 0 {
|
|
||||||
state.wordBuffer = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func chat(cmd *cobra.Command, opts runOptions) (*api.Message, error) {
|
|
||||||
client, err := api.ClientFromEnvironment()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
p := progress.NewProgress(os.Stderr)
|
|
||||||
defer p.StopAndClear()
|
|
||||||
|
|
||||||
spinner := progress.NewSpinner("")
|
|
||||||
p.Add("", spinner)
|
|
||||||
|
|
||||||
cancelCtx, cancel := context.WithCancel(cmd.Context())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
sigChan := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(sigChan, syscall.SIGINT)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
<-sigChan
|
|
||||||
cancel()
|
|
||||||
}()
|
|
||||||
|
|
||||||
var state *displayResponseState = &displayResponseState{}
|
|
||||||
var latest api.ChatResponse
|
|
||||||
var fullResponse strings.Builder
|
|
||||||
var role string
|
|
||||||
|
|
||||||
fn := func(response api.ChatResponse) error {
|
|
||||||
p.StopAndClear()
|
|
||||||
|
|
||||||
latest = response
|
|
||||||
|
|
||||||
role = response.Message.Role
|
|
||||||
content := response.Message.Content
|
|
||||||
fullResponse.WriteString(content)
|
|
||||||
|
|
||||||
displayResponse(content, opts.WordWrap, state)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
req := &api.ChatRequest{
|
|
||||||
Model: opts.Model,
|
|
||||||
Messages: opts.Messages,
|
|
||||||
Format: opts.Format,
|
|
||||||
Options: opts.Options,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := client.Chat(cancelCtx, req, fn); err != nil {
|
|
||||||
if errors.Is(err, context.Canceled) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(opts.Messages) > 0 {
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
|
|
||||||
verbose, err := cmd.Flags().GetBool("verbose")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if verbose {
|
|
||||||
latest.Summary()
|
|
||||||
}
|
|
||||||
|
|
||||||
return &api.Message{Role: role, Content: fullResponse.String()}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generate(cmd *cobra.Command, opts runOptions) error {
|
|
||||||
client, err := api.ClientFromEnvironment()
|
client, err := api.ClientFromEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -612,6 +493,11 @@ func generate(cmd *cobra.Command, opts runOptions) error {
|
|||||||
generateContext = []int{}
|
generateContext = []int{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
termWidth, _, err := term.GetSize(int(os.Stdout.Fd()))
|
||||||
|
if err != nil {
|
||||||
|
opts.WordWrap = false
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(cmd.Context())
|
ctx, cancel := context.WithCancel(cmd.Context())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@@ -623,44 +509,94 @@ func generate(cmd *cobra.Command, opts runOptions) error {
|
|||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var state *displayResponseState = &displayResponseState{}
|
var currentLineLength int
|
||||||
|
var wordBuffer string
|
||||||
|
|
||||||
fn := func(response api.GenerateResponse) error {
|
fn := func(response api.GenerateResponse) error {
|
||||||
p.StopAndClear()
|
p.StopAndClear()
|
||||||
|
|
||||||
latest = response
|
latest = response
|
||||||
content := response.Response
|
|
||||||
|
|
||||||
displayResponse(content, opts.WordWrap, state)
|
termWidth, _, _ = term.GetSize(int(os.Stdout.Fd()))
|
||||||
|
if opts.WordWrap && termWidth >= 10 {
|
||||||
|
for _, ch := range response.Response {
|
||||||
|
if currentLineLength+1 > termWidth-5 {
|
||||||
|
if len(wordBuffer) > termWidth-10 {
|
||||||
|
fmt.Printf("%s%c", wordBuffer, ch)
|
||||||
|
wordBuffer = ""
|
||||||
|
currentLineLength = 0
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// backtrack the length of the last word and clear to the end of the line
|
||||||
|
fmt.Printf("\x1b[%dD\x1b[K\n", len(wordBuffer))
|
||||||
|
fmt.Printf("%s%c", wordBuffer, ch)
|
||||||
|
currentLineLength = len(wordBuffer) + 1
|
||||||
|
} else {
|
||||||
|
fmt.Print(string(ch))
|
||||||
|
currentLineLength += 1
|
||||||
|
|
||||||
|
switch ch {
|
||||||
|
case ' ':
|
||||||
|
wordBuffer = ""
|
||||||
|
case '\n':
|
||||||
|
currentLineLength = 0
|
||||||
|
default:
|
||||||
|
wordBuffer += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s%s", wordBuffer, response.Response)
|
||||||
|
if len(wordBuffer) > 0 {
|
||||||
|
wordBuffer = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.MultiModal {
|
images := make([]api.ImageData, 0)
|
||||||
opts.Prompt, opts.Images, err = extractFileData(opts.Prompt)
|
for _, i := range opts.Images {
|
||||||
if err != nil {
|
images = append(images, api.ImageData(i))
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
request := api.GenerateRequest{
|
request := api.GenerateRequest{
|
||||||
Model: opts.Model,
|
Model: opts.Model,
|
||||||
Prompt: opts.Prompt,
|
Prompt: opts.Prompt,
|
||||||
Context: generateContext,
|
Context: generateContext,
|
||||||
Images: opts.Images,
|
|
||||||
Format: opts.Format,
|
Format: opts.Format,
|
||||||
System: opts.System,
|
System: opts.System,
|
||||||
Template: opts.Template,
|
Template: opts.Template,
|
||||||
Options: opts.Options,
|
Options: opts.Options,
|
||||||
|
Images: images,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := client.Generate(ctx, &request, fn); err != nil {
|
if err := client.Generate(ctx, &request, fn); err != nil {
|
||||||
if errors.Is(err, context.Canceled) {
|
switch {
|
||||||
|
case errors.Is(err, context.Canceled):
|
||||||
return nil
|
return nil
|
||||||
|
case strings.Contains(err.Error(), "unsupported model format"):
|
||||||
|
// pull and retry to see if the model has been updated
|
||||||
|
parts := strings.Split(opts.Model, string(os.PathSeparator))
|
||||||
|
if len(parts) == 1 {
|
||||||
|
// this is a library model, log some info
|
||||||
|
fmt.Fprintln(os.Stderr, "This model is no longer compatible with Ollama. Pulling a new version...")
|
||||||
|
}
|
||||||
|
if err := PullHandler(cmd, []string{opts.Model}); err != nil {
|
||||||
|
fmt.Printf("Error: %s\n", err)
|
||||||
|
return fmt.Errorf("unsupported model, please update this model to gguf format") // relay the original error
|
||||||
|
}
|
||||||
|
// retry
|
||||||
|
if err := client.Generate(ctx, &request, fn); err != nil {
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.Prompt != "" {
|
if opts.Prompt != "" {
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
fmt.Println()
|
fmt.Println()
|
||||||
@@ -685,8 +621,443 @@ func generate(cmd *cobra.Command, opts runOptions) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type MultilineState int
|
||||||
|
|
||||||
|
const (
|
||||||
|
MultilineNone MultilineState = iota
|
||||||
|
MultilinePrompt
|
||||||
|
MultilineSystem
|
||||||
|
MultilineTemplate
|
||||||
|
)
|
||||||
|
|
||||||
|
func modelIsMultiModal(cmd *cobra.Command, name string) bool {
|
||||||
|
// get model details
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("error: couldn't connect to ollama server")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
req := api.ShowRequest{Name: name}
|
||||||
|
resp, err := client.Show(cmd.Context(), &req)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return slices.Contains(resp.Details.Families, "clip")
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateInteractive(cmd *cobra.Command, opts generateOptions) error {
|
||||||
|
multiModal := modelIsMultiModal(cmd, opts.Model)
|
||||||
|
|
||||||
|
// load the model
|
||||||
|
loadOpts := generateOptions{
|
||||||
|
Model: opts.Model,
|
||||||
|
Prompt: "",
|
||||||
|
Images: []ImageData{},
|
||||||
|
}
|
||||||
|
if err := generate(cmd, loadOpts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set Set session variables")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show Show model information")
|
||||||
|
fmt.Fprintln(os.Stderr, " /bye Exit")
|
||||||
|
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
fmt.Fprintln(os.Stderr, "Use \"\"\" to begin a multi-line message.")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
usageSet := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter ... Set a parameter")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set system <string> Set system message")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set template <string> Set prompt template")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set history Enable history")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set nohistory Disable history")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set wordwrap Enable wordwrap")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set nowordwrap Disable wordwrap")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set format json Enable JSON mode")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set noformat Disable formatting")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set verbose Show LLM stats")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set quiet Disable LLM stats")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
usageShow := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Commands:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show license Show model license")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show modelfile Show Modelfile for this model")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show parameters Show parameters for this model")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show system Show system message")
|
||||||
|
fmt.Fprintln(os.Stderr, " /show template Show prompt template")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// only list out the most common parameters
|
||||||
|
usageParameters := func() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Available Parameters:")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter seed <int> Random number seed")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter num_predict <int> Max number of tokens to predict")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter top_k <int> Pick from top k num of tokens")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter top_p <float> Pick token based on sum of probabilities")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter num_ctx <int> Set the context size")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter temperature <float> Set creativity level")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_penalty <float> How strongly to penalize repetitions")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter repeat_last_n <int> Set how far back to look for repetitions")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter num_gpu <int> The number of layers to send to the GPU")
|
||||||
|
fmt.Fprintln(os.Stderr, " /set parameter stop \"<string>\", ... Set the stop parameters")
|
||||||
|
fmt.Fprintln(os.Stderr, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner, err := readline.New(readline.Prompt{
|
||||||
|
Prompt: ">>> ",
|
||||||
|
AltPrompt: "... ",
|
||||||
|
Placeholder: "Send a message (/? for help)",
|
||||||
|
AltPlaceholder: `Use """ to end multi-line input`,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(readline.StartBracketedPaste)
|
||||||
|
defer fmt.Printf(readline.EndBracketedPaste)
|
||||||
|
|
||||||
|
var multiline MultilineState
|
||||||
|
var prompt string
|
||||||
|
|
||||||
|
for {
|
||||||
|
line, err := scanner.Readline()
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, io.EOF):
|
||||||
|
fmt.Println()
|
||||||
|
return nil
|
||||||
|
case errors.Is(err, readline.ErrInterrupt):
|
||||||
|
if line == "" {
|
||||||
|
fmt.Println("\nUse Ctrl-D or /bye to exit.")
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner.Prompt.UseAlt = false
|
||||||
|
prompt = ""
|
||||||
|
|
||||||
|
continue
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(prompt, `"""`):
|
||||||
|
// if the prompt so far starts with """ then we're in multiline mode
|
||||||
|
// and we need to keep reading until we find a line that ends with """
|
||||||
|
cut, found := strings.CutSuffix(line, `"""`)
|
||||||
|
prompt += cut
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
prompt += "\n"
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt = strings.TrimPrefix(prompt, `"""`)
|
||||||
|
scanner.Prompt.UseAlt = false
|
||||||
|
|
||||||
|
switch multiline {
|
||||||
|
case MultilineSystem:
|
||||||
|
opts.System = prompt
|
||||||
|
prompt = ""
|
||||||
|
fmt.Println("Set system message.")
|
||||||
|
case MultilineTemplate:
|
||||||
|
opts.Template = prompt
|
||||||
|
prompt = ""
|
||||||
|
fmt.Println("Set prompt template.")
|
||||||
|
}
|
||||||
|
multiline = MultilineNone
|
||||||
|
case strings.HasPrefix(line, `"""`) && len(prompt) == 0:
|
||||||
|
scanner.Prompt.UseAlt = true
|
||||||
|
multiline = MultilinePrompt
|
||||||
|
prompt += line + "\n"
|
||||||
|
continue
|
||||||
|
case scanner.Pasting:
|
||||||
|
prompt += line + "\n"
|
||||||
|
continue
|
||||||
|
case strings.HasPrefix(line, "/list"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if err := ListHandler(cmd, args[1:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(line, "/set"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) > 1 {
|
||||||
|
switch args[1] {
|
||||||
|
case "history":
|
||||||
|
scanner.HistoryEnable()
|
||||||
|
case "nohistory":
|
||||||
|
scanner.HistoryDisable()
|
||||||
|
case "wordwrap":
|
||||||
|
opts.WordWrap = true
|
||||||
|
fmt.Println("Set 'wordwrap' mode.")
|
||||||
|
case "nowordwrap":
|
||||||
|
opts.WordWrap = false
|
||||||
|
fmt.Println("Set 'nowordwrap' mode.")
|
||||||
|
case "verbose":
|
||||||
|
cmd.Flags().Set("verbose", "true")
|
||||||
|
fmt.Println("Set 'verbose' mode.")
|
||||||
|
case "quiet":
|
||||||
|
cmd.Flags().Set("verbose", "false")
|
||||||
|
fmt.Println("Set 'quiet' mode.")
|
||||||
|
case "format":
|
||||||
|
if len(args) < 3 || args[2] != "json" {
|
||||||
|
fmt.Println("Invalid or missing format. For 'json' mode use '/set format json'")
|
||||||
|
} else {
|
||||||
|
opts.Format = args[2]
|
||||||
|
fmt.Printf("Set format to '%s' mode.\n", args[2])
|
||||||
|
}
|
||||||
|
case "noformat":
|
||||||
|
opts.Format = ""
|
||||||
|
fmt.Println("Disabled format.")
|
||||||
|
case "parameter":
|
||||||
|
if len(args) < 4 {
|
||||||
|
usageParameters()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var params []string
|
||||||
|
for _, p := range args[3:] {
|
||||||
|
params = append(params, p)
|
||||||
|
}
|
||||||
|
fp, err := api.FormatParams(map[string][]string{args[2]: params})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Couldn't set parameter: %q\n\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Set parameter '%s' to '%s'\n\n", args[2], strings.Join(params, ", "))
|
||||||
|
opts.Options[args[2]] = fp[args[2]]
|
||||||
|
case "system", "template":
|
||||||
|
if len(args) < 3 {
|
||||||
|
usageSet()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
line := strings.Join(args[2:], " ")
|
||||||
|
line = strings.TrimPrefix(line, `"""`)
|
||||||
|
if strings.HasPrefix(args[2], `"""`) {
|
||||||
|
cut, found := strings.CutSuffix(line, `"""`)
|
||||||
|
prompt += cut
|
||||||
|
if found {
|
||||||
|
if args[1] == "system" {
|
||||||
|
opts.System = prompt
|
||||||
|
fmt.Println("Set system message.")
|
||||||
|
} else {
|
||||||
|
opts.Template = prompt
|
||||||
|
fmt.Println("Set prompt template.")
|
||||||
|
}
|
||||||
|
prompt = ""
|
||||||
|
} else {
|
||||||
|
prompt = `"""` + prompt + "\n"
|
||||||
|
if args[1] == "system" {
|
||||||
|
multiline = MultilineSystem
|
||||||
|
} else {
|
||||||
|
multiline = MultilineTemplate
|
||||||
|
}
|
||||||
|
scanner.Prompt.UseAlt = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
opts.System = line
|
||||||
|
fmt.Println("Set system message.")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
usageSet()
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(line, "/show"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) > 1 {
|
||||||
|
client, err := api.ClientFromEnvironment()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("error: couldn't connect to ollama server")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp, err := client.Show(cmd.Context(), &api.ShowRequest{Name: opts.Model})
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("error: couldn't get model")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch args[1] {
|
||||||
|
case "license":
|
||||||
|
if resp.License == "" {
|
||||||
|
fmt.Print("No license was specified for this model.\n\n")
|
||||||
|
} else {
|
||||||
|
fmt.Println(resp.License)
|
||||||
|
}
|
||||||
|
case "modelfile":
|
||||||
|
fmt.Println(resp.Modelfile)
|
||||||
|
case "parameters":
|
||||||
|
if resp.Parameters == "" {
|
||||||
|
fmt.Print("No parameters were specified for this model.\n\n")
|
||||||
|
} else {
|
||||||
|
if len(opts.Options) > 0 {
|
||||||
|
fmt.Println("User defined parameters:")
|
||||||
|
for k, v := range opts.Options {
|
||||||
|
fmt.Printf("%-*s %v\n", 30, k, v)
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
}
|
||||||
|
fmt.Println("Model defined parameters:")
|
||||||
|
fmt.Println(resp.Parameters)
|
||||||
|
}
|
||||||
|
case "system":
|
||||||
|
switch {
|
||||||
|
case opts.System != "":
|
||||||
|
fmt.Println(opts.System + "\n")
|
||||||
|
case resp.System != "":
|
||||||
|
fmt.Println(resp.System + "\n")
|
||||||
|
default:
|
||||||
|
fmt.Print("No system message was specified for this model.\n\n")
|
||||||
|
}
|
||||||
|
case "template":
|
||||||
|
switch {
|
||||||
|
case opts.Template != "":
|
||||||
|
fmt.Println(opts.Template + "\n")
|
||||||
|
case resp.Template != "":
|
||||||
|
fmt.Println(resp.Template)
|
||||||
|
default:
|
||||||
|
fmt.Print("No prompt template was specified for this model.\n\n")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown command '/show %s'. Type /? for help\n", args[1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
usageShow()
|
||||||
|
}
|
||||||
|
case strings.HasPrefix(line, "/help"), strings.HasPrefix(line, "/?"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
if len(args) > 1 {
|
||||||
|
switch args[1] {
|
||||||
|
case "set", "/set":
|
||||||
|
usageSet()
|
||||||
|
case "show", "/show":
|
||||||
|
usageShow()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
usage()
|
||||||
|
}
|
||||||
|
case line == "/exit", line == "/bye":
|
||||||
|
return nil
|
||||||
|
case strings.HasPrefix(line, "/"):
|
||||||
|
args := strings.Fields(line)
|
||||||
|
isFile := false
|
||||||
|
|
||||||
|
if multiModal {
|
||||||
|
for _, f := range extractFileNames(line) {
|
||||||
|
if strings.HasPrefix(f, args[0]) {
|
||||||
|
isFile = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isFile {
|
||||||
|
prompt += line
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Unknown command '%s'. Type /? for help\n", args[0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
prompt += line
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(prompt) > 0 && multiline == MultilineNone {
|
||||||
|
opts.Prompt = prompt
|
||||||
|
if multiModal {
|
||||||
|
newPrompt, images, err := extractFileData(prompt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts.Prompt = newPrompt
|
||||||
|
|
||||||
|
// reset the context if we find another image
|
||||||
|
if len(images) > 0 {
|
||||||
|
opts.Images = images
|
||||||
|
ctx := cmd.Context()
|
||||||
|
ctx = context.WithValue(ctx, generateContextKey("context"), []int{})
|
||||||
|
cmd.SetContext(ctx)
|
||||||
|
}
|
||||||
|
if len(opts.Images) == 0 {
|
||||||
|
fmt.Println("This model requires you to add a jpeg, png, or svg image.")
|
||||||
|
fmt.Println()
|
||||||
|
prompt = ""
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := generate(cmd, opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeFilePath(fp string) string {
|
||||||
|
// Define a map of escaped characters and their replacements
|
||||||
|
replacements := map[string]string{
|
||||||
|
"\\ ": " ", // Escaped space
|
||||||
|
"\\(": "(", // Escaped left parenthesis
|
||||||
|
"\\)": ")", // Escaped right parenthesis
|
||||||
|
"\\[": "[", // Escaped left square bracket
|
||||||
|
"\\]": "]", // Escaped right square bracket
|
||||||
|
"\\{": "{", // Escaped left curly brace
|
||||||
|
"\\}": "}", // Escaped right curly brace
|
||||||
|
"\\$": "$", // Escaped dollar sign
|
||||||
|
"\\&": "&", // Escaped ampersand
|
||||||
|
"\\;": ";", // Escaped semicolon
|
||||||
|
"\\'": "'", // Escaped single quote
|
||||||
|
"\\\\": "\\", // Escaped backslash
|
||||||
|
"\\*": "*", // Escaped asterisk
|
||||||
|
"\\?": "?", // Escaped question mark
|
||||||
|
}
|
||||||
|
|
||||||
|
for escaped, actual := range replacements {
|
||||||
|
fp = strings.ReplaceAll(fp, escaped, actual)
|
||||||
|
}
|
||||||
|
return fp
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractFileNames(input string) []string {
|
||||||
|
// Regex to match file paths starting with / or ./ and include escaped spaces (\ or %20)
|
||||||
|
// and followed by more characters and a file extension
|
||||||
|
regexPattern := `(?:\./|/)[\S\\ ]+?\.(?i:jpg|jpeg|png|svg)\b`
|
||||||
|
re := regexp.MustCompile(regexPattern)
|
||||||
|
|
||||||
|
return re.FindAllString(input, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractFileData(input string) (string, []ImageData, error) {
|
||||||
|
filePaths := extractFileNames(input)
|
||||||
|
var imgs []ImageData
|
||||||
|
|
||||||
|
for _, fp := range filePaths {
|
||||||
|
nfp := normalizeFilePath(fp)
|
||||||
|
data, err := getImageData(nfp)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Printf("Couldn't process image: %q\n", err)
|
||||||
|
return "", imgs, err
|
||||||
|
}
|
||||||
|
fmt.Printf("Added image '%s'\n", nfp)
|
||||||
|
input = strings.ReplaceAll(input, fp, "")
|
||||||
|
imgs = append(imgs, data)
|
||||||
|
}
|
||||||
|
return input, imgs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func RunServer(cmd *cobra.Command, _ []string) error {
|
func RunServer(cmd *cobra.Command, _ []string) error {
|
||||||
host, port, err := net.SplitHostPort(strings.Trim(os.Getenv("OLLAMA_HOST"), "\"'"))
|
host, port, err := net.SplitHostPort(os.Getenv("OLLAMA_HOST"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
host, port = "127.0.0.1", "11434"
|
host, port = "127.0.0.1", "11434"
|
||||||
if ip := net.ParseIP(strings.Trim(os.Getenv("OLLAMA_HOST"), "[]")); ip != nil {
|
if ip := net.ParseIP(strings.Trim(os.Getenv("OLLAMA_HOST"), "[]")); ip != nil {
|
||||||
@@ -706,6 +1077,50 @@ func RunServer(cmd *cobra.Command, _ []string) error {
|
|||||||
return server.Serve(ln)
|
return server.Serve(ln)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getImageData(filePath string) ([]byte, error) {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
buf := make([]byte, 512)
|
||||||
|
_, err = file.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := http.DetectContentType(buf)
|
||||||
|
allowedTypes := []string{"image/jpeg", "image/jpg", "image/svg+xml", "image/png"}
|
||||||
|
if !slices.Contains(allowedTypes, contentType) {
|
||||||
|
return nil, fmt.Errorf("invalid image type: %s", contentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the file size exceeds 100MB
|
||||||
|
var maxSize int64 = 100 * 1024 * 1024 // 100MB in bytes
|
||||||
|
if info.Size() > maxSize {
|
||||||
|
return nil, fmt.Errorf("file size exceeds maximum limit (100MB)")
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = make([]byte, info.Size())
|
||||||
|
_, err = file.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.ReadFull(file, buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
func initializeKeypair() error {
|
func initializeKeypair() error {
|
||||||
home, err := os.UserHomeDir()
|
home, err := os.UserHomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -755,8 +1170,22 @@ func initializeKeypair() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:unused
|
func startMacApp(ctx context.Context, client *api.Client) error {
|
||||||
func waitForServer(ctx context.Context, client *api.Client) error {
|
exe, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
link, err := os.Readlink(exe)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !strings.Contains(link, "Ollama.app") {
|
||||||
|
return fmt.Errorf("could not find ollama app")
|
||||||
|
}
|
||||||
|
path := strings.Split(link, "Ollama.app")
|
||||||
|
if err := exec.Command("/usr/bin/open", "-a", path[0]+"Ollama.app").Run(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// wait for the server to start
|
// wait for the server to start
|
||||||
timeout := time.After(5 * time.Second)
|
timeout := time.After(5 * time.Second)
|
||||||
tick := time.Tick(500 * time.Millisecond)
|
tick := time.Tick(500 * time.Millisecond)
|
||||||
@@ -770,7 +1199,6 @@ func waitForServer(ctx context.Context, client *api.Client) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
|
func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
|
||||||
@@ -779,11 +1207,15 @@ func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := client.Heartbeat(cmd.Context()); err != nil {
|
if err := client.Heartbeat(cmd.Context()); err != nil {
|
||||||
if !strings.Contains(err.Error(), " refused") {
|
if !strings.Contains(err.Error(), "connection refused") {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := startApp(cmd.Context(), client); err != nil {
|
if runtime.GOOS == "darwin" {
|
||||||
return fmt.Errorf("could not connect to ollama app, is it running?")
|
if err := startMacApp(cmd.Context(), client); err != nil {
|
||||||
|
return fmt.Errorf("could not connect to ollama app, is it running?")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("could not connect to ollama server, run 'ollama serve' to start it")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -813,11 +1245,6 @@ func NewCLI() *cobra.Command {
|
|||||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||||
cobra.EnableCommandSorting = false
|
cobra.EnableCommandSorting = false
|
||||||
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// Enable colorful ANSI escape code in Windows terminal (disabled by default)
|
|
||||||
console.ConsoleFromFile(os.Stdout) //nolint:errcheck
|
|
||||||
}
|
|
||||||
|
|
||||||
rootCmd := &cobra.Command{
|
rootCmd := &cobra.Command{
|
||||||
Use: "ollama",
|
Use: "ollama",
|
||||||
Short: "Large language model runner",
|
Short: "Large language model runner",
|
||||||
|
|||||||
@@ -1,663 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
|
||||||
"github.com/jmorganca/ollama/progress"
|
|
||||||
"github.com/jmorganca/ollama/readline"
|
|
||||||
)
|
|
||||||
|
|
||||||
type MultilineState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
MultilineNone MultilineState = iota
|
|
||||||
MultilinePrompt
|
|
||||||
MultilineSystem
|
|
||||||
MultilineTemplate
|
|
||||||
)
|
|
||||||
|
|
||||||
func loadModel(cmd *cobra.Command, opts *runOptions) error {
|
|
||||||
client, err := api.ClientFromEnvironment()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
p := progress.NewProgress(os.Stderr)
|
|
||||||
defer p.StopAndClear()
|
|
||||||
|
|
||||||
spinner := progress.NewSpinner("")
|
|
||||||
p.Add("", spinner)
|
|
||||||
|
|
||||||
showReq := api.ShowRequest{Name: opts.Model}
|
|
||||||
showResp, err := client.Show(cmd.Context(), &showReq)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
opts.MultiModal = slices.Contains(showResp.Details.Families, "clip")
|
|
||||||
opts.ParentModel = showResp.Details.ParentModel
|
|
||||||
|
|
||||||
if len(showResp.Messages) > 0 {
|
|
||||||
opts.Messages = append(opts.Messages, showResp.Messages...)
|
|
||||||
}
|
|
||||||
|
|
||||||
chatReq := &api.ChatRequest{
|
|
||||||
Model: opts.Model,
|
|
||||||
Messages: []api.Message{},
|
|
||||||
}
|
|
||||||
err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error {
|
|
||||||
p.StopAndClear()
|
|
||||||
if len(opts.Messages) > 0 {
|
|
||||||
for _, msg := range opts.Messages {
|
|
||||||
switch msg.Role {
|
|
||||||
case "user":
|
|
||||||
fmt.Printf(">>> %s\n", msg.Content)
|
|
||||||
case "assistant":
|
|
||||||
state := &displayResponseState{}
|
|
||||||
displayResponse(msg.Content, opts.WordWrap, state)
|
|
||||||
fmt.Println()
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|
||||||
opts.Messages = make([]api.Message, 0)
|
|
||||||
|
|
||||||
err := loadModel(cmd, &opts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
usage := func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "Available Commands:")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set Set session variables")
|
|
||||||
fmt.Fprintln(os.Stderr, " /show Show model information")
|
|
||||||
fmt.Fprintln(os.Stderr, " /load <model> Load a session or model")
|
|
||||||
fmt.Fprintln(os.Stderr, " /save <model> Save your current session")
|
|
||||||
fmt.Fprintln(os.Stderr, " /bye Exit")
|
|
||||||
fmt.Fprintln(os.Stderr, " /?, /help Help for a command")
|
|
||||||
fmt.Fprintln(os.Stderr, " /? shortcuts Help for keyboard shortcuts")
|
|
||||||
fmt.Fprintln(os.Stderr, "")
|
|
||||||
fmt.Fprintln(os.Stderr, "Use \"\"\" to begin a multi-line message.")
|
|
||||||
|
|
||||||
if opts.MultiModal {
|
|
||||||
fmt.Fprintf(os.Stderr, "Use %s to include .jpg or .png images.\n", filepath.FromSlash("/path/to/file"))
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintln(os.Stderr, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
usageSet := func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "Available Commands:")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter ... Set a parameter")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set system <string> Set system message")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set template <string> Set prompt template")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set history Enable history")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set nohistory Disable history")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set wordwrap Enable wordwrap")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set nowordwrap Disable wordwrap")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set format json Enable JSON mode")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set noformat Disable formatting")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set verbose Show LLM stats")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set quiet Disable LLM stats")
|
|
||||||
fmt.Fprintln(os.Stderr, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
usageShortcuts := func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "Available keyboard shortcuts:")
|
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + a Move to the beginning of the line (Home)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + e Move to the end of the line (End)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Alt + b Move back (left) one word")
|
|
||||||
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
|
||||||
fmt.Fprintln(os.Stderr, "")
|
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
|
||||||
fmt.Fprintln(os.Stderr, " Ctrl + d Exit ollama (/bye)")
|
|
||||||
fmt.Fprintln(os.Stderr, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
usageShow := func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "Available Commands:")
|
|
||||||
fmt.Fprintln(os.Stderr, " /show info Show details for this model")
|
|
||||||
fmt.Fprintln(os.Stderr, " /show license Show model license")
|
|
||||||
fmt.Fprintln(os.Stderr, " /show modelfile Show Modelfile for this model")
|
|
||||||
fmt.Fprintln(os.Stderr, " /show parameters Show parameters for this model")
|
|
||||||
fmt.Fprintln(os.Stderr, " /show system Show system message")
|
|
||||||
fmt.Fprintln(os.Stderr, " /show template Show prompt template")
|
|
||||||
fmt.Fprintln(os.Stderr, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// only list out the most common parameters
|
|
||||||
usageParameters := func() {
|
|
||||||
fmt.Fprintln(os.Stderr, "Available Parameters:")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter seed <int> Random number seed")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter num_predict <int> Max number of tokens to predict")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter top_k <int> Pick from top k num of tokens")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter top_p <float> Pick token based on sum of probabilities")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter num_ctx <int> Set the context size")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter temperature <float> Set creativity level")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter repeat_penalty <float> How strongly to penalize repetitions")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter repeat_last_n <int> Set how far back to look for repetitions")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter num_gpu <int> The number of layers to send to the GPU")
|
|
||||||
fmt.Fprintln(os.Stderr, " /set parameter stop \"<string>\", ... Set the stop parameters")
|
|
||||||
fmt.Fprintln(os.Stderr, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
scanner, err := readline.New(readline.Prompt{
|
|
||||||
Prompt: ">>> ",
|
|
||||||
AltPrompt: "... ",
|
|
||||||
Placeholder: "Send a message (/? for help)",
|
|
||||||
AltPlaceholder: `Use """ to end multi-line input`,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Print(readline.StartBracketedPaste)
|
|
||||||
defer fmt.Printf(readline.EndBracketedPaste)
|
|
||||||
|
|
||||||
var sb strings.Builder
|
|
||||||
var multiline MultilineState
|
|
||||||
|
|
||||||
for {
|
|
||||||
line, err := scanner.Readline()
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, io.EOF):
|
|
||||||
fmt.Println()
|
|
||||||
return nil
|
|
||||||
case errors.Is(err, readline.ErrInterrupt):
|
|
||||||
if line == "" {
|
|
||||||
fmt.Println("\nUse Ctrl + d or /bye to exit.")
|
|
||||||
}
|
|
||||||
|
|
||||||
scanner.Prompt.UseAlt = false
|
|
||||||
sb.Reset()
|
|
||||||
|
|
||||||
continue
|
|
||||||
case err != nil:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case multiline != MultilineNone:
|
|
||||||
// check if there's a multiline terminating string
|
|
||||||
before, ok := strings.CutSuffix(line, `"""`)
|
|
||||||
sb.WriteString(before)
|
|
||||||
if !ok {
|
|
||||||
fmt.Fprintln(&sb)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch multiline {
|
|
||||||
case MultilineSystem:
|
|
||||||
opts.System = sb.String()
|
|
||||||
opts.Messages = append(opts.Messages, api.Message{Role: "system", Content: opts.System})
|
|
||||||
fmt.Println("Set system message.")
|
|
||||||
sb.Reset()
|
|
||||||
case MultilineTemplate:
|
|
||||||
opts.Template = sb.String()
|
|
||||||
fmt.Println("Set prompt template.")
|
|
||||||
sb.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
multiline = MultilineNone
|
|
||||||
scanner.Prompt.UseAlt = false
|
|
||||||
case strings.HasPrefix(line, `"""`):
|
|
||||||
line := strings.TrimPrefix(line, `"""`)
|
|
||||||
line, ok := strings.CutSuffix(line, `"""`)
|
|
||||||
sb.WriteString(line)
|
|
||||||
if !ok {
|
|
||||||
// no multiline terminating string; need more input
|
|
||||||
fmt.Fprintln(&sb)
|
|
||||||
multiline = MultilinePrompt
|
|
||||||
scanner.Prompt.UseAlt = true
|
|
||||||
}
|
|
||||||
case scanner.Pasting:
|
|
||||||
fmt.Fprintln(&sb, line)
|
|
||||||
continue
|
|
||||||
case strings.HasPrefix(line, "/list"):
|
|
||||||
args := strings.Fields(line)
|
|
||||||
if err := ListHandler(cmd, args[1:]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case strings.HasPrefix(line, "/load"):
|
|
||||||
args := strings.Fields(line)
|
|
||||||
if len(args) != 2 {
|
|
||||||
fmt.Println("Usage:\n /load <modelname>")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
opts.Model = args[1]
|
|
||||||
opts.Messages = []api.Message{}
|
|
||||||
fmt.Printf("Loading model '%s'\n", opts.Model)
|
|
||||||
if err := loadModel(cmd, &opts); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case strings.HasPrefix(line, "/save"):
|
|
||||||
args := strings.Fields(line)
|
|
||||||
if len(args) != 2 {
|
|
||||||
fmt.Println("Usage:\n /save <modelname>")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := api.ClientFromEnvironment()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("error: couldn't connect to ollama server")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req := &api.CreateRequest{
|
|
||||||
Name: args[1],
|
|
||||||
Modelfile: buildModelfile(opts),
|
|
||||||
}
|
|
||||||
fn := func(resp api.ProgressResponse) error { return nil }
|
|
||||||
err = client.Create(cmd.Context(), req, fn)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("error: couldn't save model")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf("Created new model '%s'\n", args[1])
|
|
||||||
continue
|
|
||||||
case strings.HasPrefix(line, "/set"):
|
|
||||||
args := strings.Fields(line)
|
|
||||||
if len(args) > 1 {
|
|
||||||
switch args[1] {
|
|
||||||
case "history":
|
|
||||||
scanner.HistoryEnable()
|
|
||||||
case "nohistory":
|
|
||||||
scanner.HistoryDisable()
|
|
||||||
case "wordwrap":
|
|
||||||
opts.WordWrap = true
|
|
||||||
fmt.Println("Set 'wordwrap' mode.")
|
|
||||||
case "nowordwrap":
|
|
||||||
opts.WordWrap = false
|
|
||||||
fmt.Println("Set 'nowordwrap' mode.")
|
|
||||||
case "verbose":
|
|
||||||
cmd.Flags().Set("verbose", "true")
|
|
||||||
fmt.Println("Set 'verbose' mode.")
|
|
||||||
case "quiet":
|
|
||||||
cmd.Flags().Set("verbose", "false")
|
|
||||||
fmt.Println("Set 'quiet' mode.")
|
|
||||||
case "format":
|
|
||||||
if len(args) < 3 || args[2] != "json" {
|
|
||||||
fmt.Println("Invalid or missing format. For 'json' mode use '/set format json'")
|
|
||||||
} else {
|
|
||||||
opts.Format = args[2]
|
|
||||||
fmt.Printf("Set format to '%s' mode.\n", args[2])
|
|
||||||
}
|
|
||||||
case "noformat":
|
|
||||||
opts.Format = ""
|
|
||||||
fmt.Println("Disabled format.")
|
|
||||||
case "parameter":
|
|
||||||
if len(args) < 4 {
|
|
||||||
usageParameters()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
params := args[3:]
|
|
||||||
fp, err := api.FormatParams(map[string][]string{args[2]: params})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Couldn't set parameter: %q\n", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Printf("Set parameter '%s' to '%s'\n", args[2], strings.Join(params, ", "))
|
|
||||||
opts.Options[args[2]] = fp[args[2]]
|
|
||||||
case "system", "template":
|
|
||||||
if len(args) < 3 {
|
|
||||||
usageSet()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if args[1] == "system" {
|
|
||||||
multiline = MultilineSystem
|
|
||||||
} else if args[1] == "template" {
|
|
||||||
multiline = MultilineTemplate
|
|
||||||
}
|
|
||||||
|
|
||||||
line := strings.Join(args[2:], " ")
|
|
||||||
line, ok := strings.CutPrefix(line, `"""`)
|
|
||||||
if !ok {
|
|
||||||
multiline = MultilineNone
|
|
||||||
} else {
|
|
||||||
// only cut suffix if the line is multiline
|
|
||||||
line, ok = strings.CutSuffix(line, `"""`)
|
|
||||||
if ok {
|
|
||||||
multiline = MultilineNone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.WriteString(line)
|
|
||||||
if multiline != MultilineNone {
|
|
||||||
scanner.Prompt.UseAlt = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if args[1] == "system" {
|
|
||||||
opts.System = sb.String() // for display in modelfile
|
|
||||||
newMessage := api.Message{Role: "system", Content: sb.String()}
|
|
||||||
// Check if the slice is not empty and the last message is from 'system'
|
|
||||||
if len(opts.Messages) > 0 && opts.Messages[len(opts.Messages)-1].Role == "system" {
|
|
||||||
// Replace the last message
|
|
||||||
opts.Messages[len(opts.Messages)-1] = newMessage
|
|
||||||
} else {
|
|
||||||
opts.Messages = append(opts.Messages, newMessage)
|
|
||||||
}
|
|
||||||
fmt.Println("Set system message.")
|
|
||||||
sb.Reset()
|
|
||||||
} else if args[1] == "template" {
|
|
||||||
opts.Template = sb.String()
|
|
||||||
fmt.Println("Set prompt template.")
|
|
||||||
sb.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.Reset()
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
fmt.Printf("Unknown command '/set %s'. Type /? for help\n", args[1])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
usageSet()
|
|
||||||
}
|
|
||||||
case strings.HasPrefix(line, "/show"):
|
|
||||||
args := strings.Fields(line)
|
|
||||||
if len(args) > 1 {
|
|
||||||
client, err := api.ClientFromEnvironment()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("error: couldn't connect to ollama server")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req := &api.ShowRequest{
|
|
||||||
Name: opts.Model,
|
|
||||||
System: opts.System,
|
|
||||||
Template: opts.Template,
|
|
||||||
Options: opts.Options,
|
|
||||||
}
|
|
||||||
resp, err := client.Show(cmd.Context(), req)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("error: couldn't get model")
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch args[1] {
|
|
||||||
case "info":
|
|
||||||
fmt.Println("Model details:")
|
|
||||||
if len(resp.Details.Families) > 0 {
|
|
||||||
fmt.Printf("Family %s\n", strings.Join(resp.Details.Families, ", "))
|
|
||||||
} else if resp.Details.Family != "" {
|
|
||||||
fmt.Printf("Family %s\n", resp.Details.Family)
|
|
||||||
}
|
|
||||||
fmt.Printf("Parameter Size %s\n", resp.Details.ParameterSize)
|
|
||||||
fmt.Printf("Quantization Level %s\n", resp.Details.QuantizationLevel)
|
|
||||||
fmt.Println("")
|
|
||||||
case "license":
|
|
||||||
if resp.License == "" {
|
|
||||||
fmt.Println("No license was specified for this model.")
|
|
||||||
} else {
|
|
||||||
fmt.Println(resp.License)
|
|
||||||
}
|
|
||||||
case "modelfile":
|
|
||||||
fmt.Println(resp.Modelfile)
|
|
||||||
case "parameters":
|
|
||||||
if resp.Parameters == "" {
|
|
||||||
fmt.Println("No parameters were specified for this model.")
|
|
||||||
} else {
|
|
||||||
if len(opts.Options) > 0 {
|
|
||||||
fmt.Println("User defined parameters:")
|
|
||||||
for k, v := range opts.Options {
|
|
||||||
fmt.Printf("%-*s %v\n", 30, k, v)
|
|
||||||
}
|
|
||||||
fmt.Println()
|
|
||||||
}
|
|
||||||
fmt.Println("Model defined parameters:")
|
|
||||||
fmt.Println(resp.Parameters)
|
|
||||||
}
|
|
||||||
case "system":
|
|
||||||
switch {
|
|
||||||
case opts.System != "":
|
|
||||||
fmt.Println(opts.System + "\n")
|
|
||||||
case resp.System != "":
|
|
||||||
fmt.Println(resp.System + "\n")
|
|
||||||
default:
|
|
||||||
fmt.Println("No system message was specified for this model.")
|
|
||||||
}
|
|
||||||
case "template":
|
|
||||||
switch {
|
|
||||||
case opts.Template != "":
|
|
||||||
fmt.Println(opts.Template + "\n")
|
|
||||||
case resp.Template != "":
|
|
||||||
fmt.Println(resp.Template)
|
|
||||||
default:
|
|
||||||
fmt.Println("No prompt template was specified for this model.")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Printf("Unknown command '/show %s'. Type /? for help\n", args[1])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
usageShow()
|
|
||||||
}
|
|
||||||
case strings.HasPrefix(line, "/help"), strings.HasPrefix(line, "/?"):
|
|
||||||
args := strings.Fields(line)
|
|
||||||
if len(args) > 1 {
|
|
||||||
switch args[1] {
|
|
||||||
case "set", "/set":
|
|
||||||
usageSet()
|
|
||||||
case "show", "/show":
|
|
||||||
usageShow()
|
|
||||||
case "shortcut", "shortcuts":
|
|
||||||
usageShortcuts()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
usage()
|
|
||||||
}
|
|
||||||
case strings.HasPrefix(line, "/exit"), strings.HasPrefix(line, "/bye"):
|
|
||||||
return nil
|
|
||||||
case strings.HasPrefix(line, "/"):
|
|
||||||
args := strings.Fields(line)
|
|
||||||
isFile := false
|
|
||||||
|
|
||||||
if opts.MultiModal {
|
|
||||||
for _, f := range extractFileNames(line) {
|
|
||||||
if strings.HasPrefix(f, args[0]) {
|
|
||||||
isFile = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isFile {
|
|
||||||
fmt.Printf("Unknown command '%s'. Type /? for help\n", args[0])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.WriteString(line)
|
|
||||||
default:
|
|
||||||
sb.WriteString(line)
|
|
||||||
}
|
|
||||||
|
|
||||||
if sb.Len() > 0 && multiline == MultilineNone {
|
|
||||||
newMessage := api.Message{Role: "user", Content: sb.String()}
|
|
||||||
|
|
||||||
if opts.MultiModal {
|
|
||||||
msg, images, err := extractFileData(sb.String())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// clear all previous images for better responses
|
|
||||||
if len(images) > 0 {
|
|
||||||
for i := range opts.Messages {
|
|
||||||
opts.Messages[i].Images = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newMessage.Content = msg
|
|
||||||
newMessage.Images = images
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Messages = append(opts.Messages, newMessage)
|
|
||||||
|
|
||||||
assistant, err := chat(cmd, opts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if assistant != nil {
|
|
||||||
opts.Messages = append(opts.Messages, *assistant)
|
|
||||||
}
|
|
||||||
|
|
||||||
sb.Reset()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildModelfile(opts runOptions) string {
|
|
||||||
var mf strings.Builder
|
|
||||||
model := opts.ParentModel
|
|
||||||
if model == "" {
|
|
||||||
model = opts.Model
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&mf, "FROM %s\n", model)
|
|
||||||
if opts.System != "" {
|
|
||||||
fmt.Fprintf(&mf, "SYSTEM \"\"\"%s\"\"\"\n", opts.System)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Template != "" {
|
|
||||||
fmt.Fprintf(&mf, "TEMPLATE \"\"\"%s\"\"\"\n", opts.Template)
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := make([]string, 0)
|
|
||||||
for k := range opts.Options {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
fmt.Fprintf(&mf, "PARAMETER %s %v\n", k, opts.Options[k])
|
|
||||||
}
|
|
||||||
fmt.Fprintln(&mf)
|
|
||||||
|
|
||||||
for _, msg := range opts.Messages {
|
|
||||||
fmt.Fprintf(&mf, "MESSAGE %s \"\"\"%s\"\"\"\n", msg.Role, msg.Content)
|
|
||||||
}
|
|
||||||
|
|
||||||
return mf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeFilePath(fp string) string {
|
|
||||||
// Define a map of escaped characters and their replacements
|
|
||||||
replacements := map[string]string{
|
|
||||||
"\\ ": " ", // Escaped space
|
|
||||||
"\\(": "(", // Escaped left parenthesis
|
|
||||||
"\\)": ")", // Escaped right parenthesis
|
|
||||||
"\\[": "[", // Escaped left square bracket
|
|
||||||
"\\]": "]", // Escaped right square bracket
|
|
||||||
"\\{": "{", // Escaped left curly brace
|
|
||||||
"\\}": "}", // Escaped right curly brace
|
|
||||||
"\\$": "$", // Escaped dollar sign
|
|
||||||
"\\&": "&", // Escaped ampersand
|
|
||||||
"\\;": ";", // Escaped semicolon
|
|
||||||
"\\'": "'", // Escaped single quote
|
|
||||||
"\\\\": "\\", // Escaped backslash
|
|
||||||
"\\*": "*", // Escaped asterisk
|
|
||||||
"\\?": "?", // Escaped question mark
|
|
||||||
}
|
|
||||||
|
|
||||||
for escaped, actual := range replacements {
|
|
||||||
fp = strings.ReplaceAll(fp, escaped, actual)
|
|
||||||
}
|
|
||||||
return fp
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractFileNames(input string) []string {
|
|
||||||
// Regex to match file paths starting with optional drive letter, / ./ \ or .\ and include escaped or unescaped spaces (\ or %20)
|
|
||||||
// and followed by more characters and a file extension
|
|
||||||
// This will capture non filename strings, but we'll check for file existence to remove mismatches
|
|
||||||
regexPattern := `(?:[a-zA-Z]:)?(?:\./|/|\\)[\S\\ ]+?\.(?i:jpg|jpeg|png|svg)\b`
|
|
||||||
re := regexp.MustCompile(regexPattern)
|
|
||||||
|
|
||||||
return re.FindAllString(input, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractFileData(input string) (string, []api.ImageData, error) {
|
|
||||||
filePaths := extractFileNames(input)
|
|
||||||
var imgs []api.ImageData
|
|
||||||
|
|
||||||
for _, fp := range filePaths {
|
|
||||||
nfp := normalizeFilePath(fp)
|
|
||||||
data, err := getImageData(nfp)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprintf(os.Stderr, "Couldn't process image: %q\n", err)
|
|
||||||
return "", imgs, err
|
|
||||||
}
|
|
||||||
fmt.Fprintf(os.Stderr, "Added image '%s'\n", nfp)
|
|
||||||
input = strings.ReplaceAll(input, fp, "")
|
|
||||||
imgs = append(imgs, data)
|
|
||||||
}
|
|
||||||
return input, imgs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getImageData(filePath string) ([]byte, error) {
|
|
||||||
file, err := os.Open(filePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
buf := make([]byte, 512)
|
|
||||||
_, err = file.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
contentType := http.DetectContentType(buf)
|
|
||||||
allowedTypes := []string{"image/jpeg", "image/jpg", "image/png"}
|
|
||||||
if !slices.Contains(allowedTypes, contentType) {
|
|
||||||
return nil, fmt.Errorf("invalid image type: %s", contentType)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := file.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the file size exceeds 100MB
|
|
||||||
var maxSize int64 = 100 * 1024 * 1024 // 100MB in bytes
|
|
||||||
if info.Size() > maxSize {
|
|
||||||
return nil, fmt.Errorf("file size exceeds maximum limit (100MB)")
|
|
||||||
}
|
|
||||||
|
|
||||||
buf = make([]byte, info.Size())
|
|
||||||
_, err = file.Seek(0, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.ReadFull(file, buf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
@@ -1,116 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"testing"
|
|
||||||
"text/template"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestExtractFilenames(t *testing.T) {
|
|
||||||
// Unix style paths
|
|
||||||
input := ` some preamble
|
|
||||||
./relative\ path/one.png inbetween1 ./not a valid two.jpg inbetween2
|
|
||||||
/unescaped space /three.jpeg inbetween3 /valid\ path/dir/four.png "./quoted with spaces/five.svg`
|
|
||||||
res := extractFileNames(input)
|
|
||||||
assert.Len(t, res, 5)
|
|
||||||
assert.Contains(t, res[0], "one.png")
|
|
||||||
assert.Contains(t, res[1], "two.jpg")
|
|
||||||
assert.Contains(t, res[2], "three.jpeg")
|
|
||||||
assert.Contains(t, res[3], "four.png")
|
|
||||||
assert.Contains(t, res[4], "five.svg")
|
|
||||||
assert.NotContains(t, res[4], '"')
|
|
||||||
assert.NotContains(t, res, "inbtween")
|
|
||||||
|
|
||||||
// Windows style paths
|
|
||||||
input = ` some preamble
|
|
||||||
c:/users/jdoe/one.png inbetween1 c:/program files/someplace/two.jpg inbetween2
|
|
||||||
/absolute/nospace/three.jpeg inbetween3 /absolute/with space/four.png inbetween4
|
|
||||||
./relative\ path/five.svg inbetween5 "./relative with/spaces/six.png inbetween6
|
|
||||||
d:\path with\spaces\seven.svg inbetween7 c:\users\jdoe\eight.png inbetween8
|
|
||||||
d:\program files\someplace\nine.png inbetween9 "E:\program files\someplace\ten.svg some ending
|
|
||||||
`
|
|
||||||
res = extractFileNames(input)
|
|
||||||
assert.Len(t, res, 10)
|
|
||||||
assert.NotContains(t, res, "inbtween")
|
|
||||||
assert.Contains(t, res[0], "one.png")
|
|
||||||
assert.Contains(t, res[0], "c:")
|
|
||||||
assert.Contains(t, res[1], "two.jpg")
|
|
||||||
assert.Contains(t, res[1], "c:")
|
|
||||||
assert.Contains(t, res[2], "three.jpeg")
|
|
||||||
assert.Contains(t, res[3], "four.png")
|
|
||||||
assert.Contains(t, res[4], "five.svg")
|
|
||||||
assert.Contains(t, res[5], "six.png")
|
|
||||||
assert.Contains(t, res[6], "seven.svg")
|
|
||||||
assert.Contains(t, res[6], "d:")
|
|
||||||
assert.Contains(t, res[7], "eight.png")
|
|
||||||
assert.Contains(t, res[7], "c:")
|
|
||||||
assert.Contains(t, res[8], "nine.png")
|
|
||||||
assert.Contains(t, res[8], "d:")
|
|
||||||
assert.Contains(t, res[9], "ten.svg")
|
|
||||||
assert.Contains(t, res[9], "E:")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestModelfileBuilder(t *testing.T) {
|
|
||||||
opts := runOptions{
|
|
||||||
Model: "hork",
|
|
||||||
System: "You are part horse and part shark, but all hork. Do horklike things",
|
|
||||||
Template: "This is a template.",
|
|
||||||
Messages: []api.Message{
|
|
||||||
{Role: "user", Content: "Hey there hork!"},
|
|
||||||
{Role: "assistant", Content: "Yes it is true, I am half horse, half shark."},
|
|
||||||
},
|
|
||||||
Options: map[string]interface{}{},
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.Options["temperature"] = 0.9
|
|
||||||
opts.Options["seed"] = 42
|
|
||||||
opts.Options["penalize_newline"] = false
|
|
||||||
opts.Options["stop"] = []string{"hi", "there"}
|
|
||||||
|
|
||||||
mf := buildModelfile(opts)
|
|
||||||
expectedModelfile := `FROM {{.Model}}
|
|
||||||
SYSTEM """{{.System}}"""
|
|
||||||
TEMPLATE """{{.Template}}"""
|
|
||||||
PARAMETER penalize_newline false
|
|
||||||
PARAMETER seed 42
|
|
||||||
PARAMETER stop [hi there]
|
|
||||||
PARAMETER temperature 0.9
|
|
||||||
|
|
||||||
MESSAGE user """Hey there hork!"""
|
|
||||||
MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
|
||||||
`
|
|
||||||
|
|
||||||
tmpl, err := template.New("").Parse(expectedModelfile)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
err = tmpl.Execute(&buf, opts)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, buf.String(), mf)
|
|
||||||
|
|
||||||
opts.ParentModel = "horseshark"
|
|
||||||
mf = buildModelfile(opts)
|
|
||||||
expectedModelfile = `FROM {{.ParentModel}}
|
|
||||||
SYSTEM """{{.System}}"""
|
|
||||||
TEMPLATE """{{.Template}}"""
|
|
||||||
PARAMETER penalize_newline false
|
|
||||||
PARAMETER seed 42
|
|
||||||
PARAMETER stop [hi there]
|
|
||||||
PARAMETER temperature 0.9
|
|
||||||
|
|
||||||
MESSAGE user """Hey there hork!"""
|
|
||||||
MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
|
||||||
`
|
|
||||||
|
|
||||||
tmpl, err = template.New("").Parse(expectedModelfile)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
|
|
||||||
var parentBuf bytes.Buffer
|
|
||||||
err = tmpl.Execute(&parentBuf, opts)
|
|
||||||
assert.Nil(t, err)
|
|
||||||
assert.Equal(t, parentBuf.String(), mf)
|
|
||||||
}
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func startApp(ctx context.Context, client *api.Client) error {
|
|
||||||
exe, err := os.Executable()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
link, err := os.Readlink(exe)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !strings.Contains(link, "Ollama.app") {
|
|
||||||
return fmt.Errorf("could not find ollama app")
|
|
||||||
}
|
|
||||||
path := strings.Split(link, "Ollama.app")
|
|
||||||
if err := exec.Command("/usr/bin/open", "-a", path[0]+"Ollama.app").Run(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return waitForServer(ctx, client)
|
|
||||||
}
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
//go:build !windows && !darwin
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func startApp(ctx context.Context, client *api.Client) error {
|
|
||||||
return fmt.Errorf("could not connect to ollama server, run 'ollama serve' to start it")
|
|
||||||
}
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func startApp(ctx context.Context, client *api.Client) error {
|
|
||||||
// log.Printf("XXX Attempting to find and start ollama app")
|
|
||||||
AppName := "ollama app.exe"
|
|
||||||
exe, err := os.Executable()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
appExe := filepath.Join(filepath.Dir(exe), AppName)
|
|
||||||
_, err = os.Stat(appExe)
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
// Try the standard install location
|
|
||||||
localAppData := os.Getenv("LOCALAPPDATA")
|
|
||||||
appExe = filepath.Join(localAppData, "Ollama", AppName)
|
|
||||||
_, err := os.Stat(appExe)
|
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
|
||||||
// Finally look in the path
|
|
||||||
appExe, err = exec.LookPath(AppName)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not locate ollama app")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// log.Printf("XXX attempting to start app %s", appExe)
|
|
||||||
|
|
||||||
cmd_path := "c:\\Windows\\system32\\cmd.exe"
|
|
||||||
cmd := exec.Command(cmd_path, "/c", appExe)
|
|
||||||
// TODO - these hide flags aren't working - still pops up a command window for some reason
|
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{CreationFlags: 0x08000000, HideWindow: true}
|
|
||||||
|
|
||||||
// TODO this didn't help either...
|
|
||||||
cmd.Stdin = strings.NewReader("")
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
return fmt.Errorf("unable to start ollama app %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cmd.Process != nil {
|
|
||||||
defer cmd.Process.Release() //nolint:errcheck
|
|
||||||
}
|
|
||||||
return waitForServer(ctx, client)
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# Documentation
|
# Documentation
|
||||||
|
|
||||||
To get started, see the project's **[quickstart](../README.md#quickstart)**.
|
To get started, see the project's **[quicktart](../README.md#quickstart)**.
|
||||||
|
|
||||||
Ollama is a tool for running AI models on your hardware. Many users will choose to use the Command Line Interface (CLI) to work with Ollama. Learn more about all the commands in the CLI in the **[Main Readme](../README.md)**.
|
Ollama is a tool for running AI models on your hardware. Many users will choose to use the Command Line Interface (CLI) to work with Ollama. Learn more about all the commands in the CLI in the **[Main Readme](../README.md)**.
|
||||||
|
|
||||||
@@ -10,9 +10,9 @@ Create new models or modify models already in the library using the Modelfile. L
|
|||||||
|
|
||||||
Import models using source model weights found on Hugging Face and similar sites by referring to the **[Import Documentation](./import.md)**.
|
Import models using source model weights found on Hugging Face and similar sites by referring to the **[Import Documentation](./import.md)**.
|
||||||
|
|
||||||
Installing on Linux in most cases is easy using the script on [ollama.com/download](ollama.com/download). To get more detail about the install, including CUDA drivers, see the **[Linux Documentation](./linux.md)**.
|
Installing on Linux in most cases is easy using the script on Ollama.ai. To get more detail about the install, including CUDA drivers, see the **[Linux Documentation](./linux.md)**.
|
||||||
|
|
||||||
Many of our users like the flexibility of using our official Docker Image. Learn more about using Docker with Ollama using the **[Docker Documentation](https://hub.docker.com/r/ollama/ollama)**.
|
Many of our users like the flexibility of using our official Docker Image. Learn more about using Docker with Ollama using the **[Docker Documentation](./docker.md)**.
|
||||||
|
|
||||||
It is easy to install on Linux and Mac, but many users will choose to build Ollama on their own. To do this, refer to the **[Development Documentation](./development.md)**.
|
It is easy to install on Linux and Mac, but many users will choose to build Ollama on their own. To do this, refer to the **[Development Documentation](./development.md)**.
|
||||||
|
|
||||||
@@ -22,4 +22,4 @@ Finally for all the questions that don't fit anywhere else, there is the **[FAQ]
|
|||||||
|
|
||||||
[Tutorials](./tutorials.md) apply the documentation to tasks.
|
[Tutorials](./tutorials.md) apply the documentation to tasks.
|
||||||
|
|
||||||
For working code examples of using Ollama, see [Examples](../examples).
|
For working code examples of using Ollama, see [Examples](../examples).
|
||||||
141
docs/api.md
@@ -27,6 +27,7 @@ All durations are returned in nanoseconds.
|
|||||||
|
|
||||||
Certain endpoints stream responses as JSON objects and can optional return non-streamed responses.
|
Certain endpoints stream responses as JSON objects and can optional return non-streamed responses.
|
||||||
|
|
||||||
|
|
||||||
## Generate a completion
|
## Generate a completion
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@@ -46,11 +47,10 @@ Advanced parameters (optional):
|
|||||||
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
||||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||||
- `system`: system message to (overrides what is defined in the `Modelfile`)
|
- `system`: system message to (overrides what is defined in the `Modelfile`)
|
||||||
- `template`: the prompt template to use (overrides what is defined in the `Modelfile`)
|
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
|
||||||
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
|
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
|
||||||
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||||
- `raw`: if `true` no formatting will be applied to the prompt. You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API
|
- `raw`: if `true` no formatting will be applied to the prompt. You may choose to use the `raw` parameter if you are specifying a full templated prompt in your request to the API.
|
||||||
- `keep_alive`: controls how long the model will stay loaded into memory following the request (default: `5m`)
|
|
||||||
|
|
||||||
#### JSON mode
|
#### JSON mode
|
||||||
|
|
||||||
@@ -104,12 +104,12 @@ To calculate how fast the response is generated in tokens per second (token/s),
|
|||||||
"response": "",
|
"response": "",
|
||||||
"done": true,
|
"done": true,
|
||||||
"context": [1, 2, 3],
|
"context": [1, 2, 3],
|
||||||
"total_duration": 10706818083,
|
"total_duration":10706818083,
|
||||||
"load_duration": 6338219291,
|
"load_duration":6338219291,
|
||||||
"prompt_eval_count": 26,
|
"prompt_eval_count":26,
|
||||||
"prompt_eval_duration": 130079000,
|
"prompt_eval_duration":130079000,
|
||||||
"eval_count": 259,
|
"eval_count":259,
|
||||||
"eval_duration": 4232710000
|
"eval_duration":4232710000
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -170,7 +170,7 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
"created_at": "2023-11-09T21:07:55.186497Z",
|
"created_at": "2023-11-09T21:07:55.186497Z",
|
||||||
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
"response": "{\n\"morning\": {\n\"color\": \"blue\"\n},\n\"noon\": {\n\"color\": \"blue-gray\"\n},\n\"afternoon\": {\n\"color\": \"warm gray\"\n},\n\"evening\": {\n\"color\": \"orange\"\n}\n}\n",
|
||||||
"done": true,
|
"done": true,
|
||||||
"context": [1, 2, 3],
|
"context": [1, 2, 3],
|
||||||
"total_duration": 4648158584,
|
"total_duration": 4648158584,
|
||||||
"load_duration": 4071084,
|
"load_duration": 4071084,
|
||||||
"prompt_eval_count": 36,
|
"prompt_eval_count": 36,
|
||||||
@@ -235,7 +235,6 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
#### Request (Raw Mode)
|
#### Request (Raw Mode)
|
||||||
|
|
||||||
In some cases, you may wish to bypass the templating system and provide a full prompt. In this case, you can use the `raw` parameter to disable templating. Also note that raw mode will not return a context.
|
In some cases, you may wish to bypass the templating system and provide a full prompt. In this case, you can use the `raw` parameter to disable templating. Also note that raw mode will not return a context.
|
||||||
|
|
||||||
##### Request
|
##### Request
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@@ -247,23 +246,6 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Request (Reproducible outputs)
|
|
||||||
|
|
||||||
For reproducible outputs, set `temperature` to 0 and `seed` to a number:
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/api/generate -d '{
|
|
||||||
"model": "mistral",
|
|
||||||
"prompt": "[INST] why is the sky blue? [/INST]",
|
|
||||||
"options": {
|
|
||||||
"seed": 101,
|
|
||||||
"temperature": 0
|
|
||||||
}
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Response
|
##### Response
|
||||||
|
|
||||||
```json
|
```json
|
||||||
@@ -337,7 +319,7 @@ curl http://localhost:11434/api/generate -d '{
|
|||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"response": "The sky is blue because it is the color of the sky.",
|
"response": "The sky is blue because it is the color of the sky.",
|
||||||
"done": true,
|
"done": true,
|
||||||
"context": [1, 2, 3],
|
"context": [1, 2, 3],
|
||||||
"total_duration": 4935886791,
|
"total_duration": 4935886791,
|
||||||
"load_duration": 534986708,
|
"load_duration": 534986708,
|
||||||
"prompt_eval_count": 26,
|
"prompt_eval_count": 26,
|
||||||
@@ -365,10 +347,10 @@ A single JSON object is returned:
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"model": "llama2",
|
"model":"llama2",
|
||||||
"created_at": "2023-12-18T19:52:07.071755Z",
|
"created_at":"2023-12-18T19:52:07.071755Z",
|
||||||
"response": "",
|
"response":"",
|
||||||
"done": true
|
"done":true
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -395,9 +377,8 @@ Advanced parameters (optional):
|
|||||||
|
|
||||||
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
- `format`: the format to return a response in. Currently the only accepted value is `json`
|
||||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||||
- `template`: the prompt template to use (overrides what is defined in the `Modelfile`)
|
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
|
||||||
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
- `stream`: if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||||
- `keep_alive`: controls how long the model will stay loaded into memory following the request (default: `5m`)
|
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
@@ -428,8 +409,8 @@ A stream of JSON objects is returned:
|
|||||||
"model": "llama2",
|
"model": "llama2",
|
||||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
"message": {
|
"message": {
|
||||||
"role": "assistant",
|
"role": "assisant",
|
||||||
"content": "The",
|
"content": "The",
|
||||||
"images": null
|
"images": null
|
||||||
},
|
},
|
||||||
"done": false
|
"done": false
|
||||||
@@ -443,12 +424,12 @@ Final response:
|
|||||||
"model": "llama2",
|
"model": "llama2",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 4883583458,
|
"total_duration":4883583458,
|
||||||
"load_duration": 1334875,
|
"load_duration":1334875,
|
||||||
"prompt_eval_count": 26,
|
"prompt_eval_count":26,
|
||||||
"prompt_eval_duration": 342546000,
|
"prompt_eval_duration":342546000,
|
||||||
"eval_count": 282,
|
"eval_count":282,
|
||||||
"eval_duration": 4535599000
|
"eval_duration":4535599000
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -464,7 +445,7 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "why is the sky blue?"
|
"content": "why is the sky blue?"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"stream": false
|
"stream": false
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
@@ -524,7 +505,7 @@ A stream of JSON objects is returned:
|
|||||||
"model": "llama2",
|
"model": "llama2",
|
||||||
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
"created_at": "2023-08-04T08:52:19.385406455-07:00",
|
||||||
"message": {
|
"message": {
|
||||||
"role": "assistant",
|
"role": "assisant",
|
||||||
"content": "The"
|
"content": "The"
|
||||||
},
|
},
|
||||||
"done": false
|
"done": false
|
||||||
@@ -538,12 +519,12 @@ Final response:
|
|||||||
"model": "llama2",
|
"model": "llama2",
|
||||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 8113331500,
|
"total_duration":8113331500,
|
||||||
"load_duration": 6396458,
|
"load_duration":6396458,
|
||||||
"prompt_eval_count": 61,
|
"prompt_eval_count":61,
|
||||||
"prompt_eval_duration": 398801000,
|
"prompt_eval_duration":398801000,
|
||||||
"eval_count": 468,
|
"eval_count":468,
|
||||||
"eval_duration": 7701267000
|
"eval_duration":7701267000
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -561,7 +542,7 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
"role": "user",
|
"role": "user",
|
||||||
"content": "what is in this image?",
|
"content": "what is in this image?",
|
||||||
"images": ["iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"]
|
"images": ["iVBORw0KGgoAAAANSUhEUgAAAG0AAABmCAYAAADBPx+VAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAA3VSURBVHgB7Z27r0zdG8fX743i1bi1ikMoFMQloXRpKFFIqI7LH4BEQ+NWIkjQuSWCRIEoULk0gsK1kCBI0IhrQVT7tz/7zZo888yz1r7MnDl7z5xvsjkzs2fP3uu71nNfa7lkAsm7d++Sffv2JbNmzUqcc8m0adOSzZs3Z+/XES4ZckAWJEGWPiCxjsQNLWmQsWjRIpMseaxcuTKpG/7HP27I8P79e7dq1ars/yL4/v27S0ejqwv+cUOGEGGpKHR37tzJCEpHV9tnT58+dXXCJDdECBE2Ojrqjh071hpNECjx4cMHVycM1Uhbv359B2F79+51586daxN/+pyRkRFXKyRDAqxEp4yMlDDzXG1NPnnyJKkThoK0VFd1ELZu3TrzXKxKfW7dMBQ6bcuWLW2v0VlHjx41z717927ba22U9APcw7Nnz1oGEPeL3m3p2mTAYYnFmMOMXybPPXv2bNIPpFZr1NHn4HMw0KRBjg9NuRw95s8PEcz/6DZELQd/09C9QGq5RsmSRybqkwHGjh07OsJSsYYm3ijPpyHzoiacg35MLdDSIS/O1yM778jOTwYUkKNHWUzUWaOsylE00MyI0fcnOwIdjvtNdW/HZwNLGg+sR1kMepSNJXmIwxBZiG8tDTpEZzKg0GItNsosY8USkxDhD0Rinuiko2gfL/RbiD2LZAjU9zKQJj8RDR0vJBR1/Phx9+PHj9Z7REF4nTZkxzX4LCXHrV271qXkBAPGfP/atWvu/PnzHe4C97F48eIsRLZ9+3a3f/9+87dwP1JxaF7/3r17ba+5l4EcaVo0lj3SBq5kGTJSQmLWMjgYNei2GPT1MuMqGTDEFHzeQSP2wi/jGnkmPJ/nhccs44jvDAxpVcxnq0F6eT8h4ni/iIWpR5lPyA6ETkNXoSukvpJAD3AsXLiwpZs49+fPn5ke4j10TqYvegSfn0OnafC+Tv9ooA/JPkgQysqQNBzagXY55nO/oa1F7qvIPWkRL12WRpMWUvpVDYmxAPehxWSe8ZEXL20sadYIozfmNch4QJPAfeJgW3rNsnzphBKNJM2KKODo1rVOMRYik5ETy3ix4qWNI81qAAirizgMIc+yhTytx0JWZuNI03qsrgWlGtwjoS9XwgUhWGyhUaRZZQNNIEwCiXD16tXcAHUs79co0vSD8rrJCIW98pzvxpAWyyo3HYwqS0+H0BjStClcZJT5coMm6D2LOF8TolGJtK9fvyZpyiC5ePFi9nc/oJU4eiEP0jVoAnHa9wyJycITMP78+eMeP37sXrx44d6+fdt6f82aNdkx1pg9e3Zb5W+RSRE+n+VjksQWifvVaTKFhn5O8my63K8Qabdv33b379/PiAP//vuvW7BggZszZ072/+TJk91YgkafPn166zXB1rQHFvouAWHq9z3SEevSUerqCn2/dDCeta2jxYbr69evk4MHDyY7d+7MjhMnTiTPnz9Pfv/+nfQT2ggpO2dMF8cghuoM7Ygj5iWCqRlGFml0QC/ftGmTmzt3rmsaKDsgBSPh0/8yPeLLBihLkOKJc0jp8H8vUzcxIA1k6QJ/c78tWEyj5P3o4u9+jywNPdJi5rAH9x0KHcl4Hg570eQp3+vHXGyrmEeigzQsQsjavXt38ujRo44LQuDDhw+TW7duRS1HGgMxhNXHgflaNTOsHyKvHK5Ijo2jbFjJBQK9YwFd6RVMzfgRBmEfP37suBBm/p49e1qjEP2mwTViNRo0VJWH1deMXcNK08uUjVUu7s/zRaL+oLNxz1bpANco4npUgX4G2eFbpDFyQoQxojBCpEGSytmOH8qrH5Q9vuzD6ofQylkCUmh8DBAr+q8JCyVNtWQIidKQE9wNtLSQnS4jDSsxNHogzFuQBw4cyM61UKVsjfr3ooBkPSqqQHesUPWVtzi9/vQi1T+rJj7WiTz4Pt/l3LxUkr5P2VYZaZ4URpsE+st/dujQoaBBYokbrz/8TJNQYLSonrPS9kUaSkPeZyj1AWSj+d+VBoy1pIWVNed8P0Ll/ee5HdGRhrHhR5GGN0r4LGZBaj8oFDJitBTJzIZgFcmU0Y8ytWMZMzJOaXUSrUs5RxKnrxmbb5YXO9VGUhtpXldhEUogFr3IzIsvlpmdosVcGVGXFWp2oU9kLFL3dEkSz6NHEY1sjSRdIuDFWEhd8KxFqsRi1uM/nz9/zpxnwlESONdg6dKlbsaMGS4EHFHtjFIDHwKOo46l4TxSuxgDzi+rE2jg+BaFruOX4HXa0Nnf1lwAPufZeF8/r6zD97WK2qFnGjBxTw5qNGPxT+5T/r7/7RawFC3j4vTp09koCxkeHjqbHJqArmH5UrFKKksnxrK7FuRIs8STfBZv+luugXZ2pR/pP9Ois4z+TiMzUUkUjD0iEi1fzX8GmXyuxUBRcaUfykV0YZnlJGKQpOiGB76x5GeWkWWJc3mOrK6S7xdND+W5N6XyaRgtWJFe13GkaZnKOsYqGdOVVVbGupsyA/l7emTLHi7vwTdirNEt0qxnzAvBFcnQF16xh/TMpUuXHDowhlA9vQVraQhkudRdzOnK+04ZSP3DUhVSP61YsaLtd/ks7ZgtPcXqPqEafHkdqa84X6aCeL7YWlv6edGFHb+ZFICPlljHhg0bKuk0CSvVznWsotRu433alNdFrqG45ejoaPCaUkWERpLXjzFL2Rpllp7PJU2a/v7Ab8N05/9t27Z16KUqoFGsxnI9EosS2niSYg9SpU6B4JgTrvVW1flt1sT+0ADIJU2maXzcUTraGCRaL1Wp9rUMk16PMom8QhruxzvZIegJjFU7LLCePfS8uaQdPny4jTTL0dbee5mYokQsXTIWNY46kuMbnt8Kmec+LGWtOVIl9cT1rCB0V8WqkjAsRwta93TbwNYoGKsUSChN44lgBNCoHLHzquYKrU6qZ8lolCIN0Rh6cP0Q3U6I6IXILYOQI513hJaSKAorFpuHXJNfVlpRtmYBk1Su1obZr5dnKAO+L10Hrj3WZW+E3qh6IszE37F6EB+68mGpvKm4eb9bFrlzrok7fvr0Kfv727dvWRmdVTJHw0qiiCUSZ6wCK+7XL/AcsgNyL74DQQ730sv78Su7+t/A36MdY0sW5o40ahslXr58aZ5HtZB8GH64m9EmMZ7FpYw4T6QnrZfgenrhFxaSiSGXtPnz57e9TkNZLvTjeqhr734CNtrK41L40sUQckmj1lGKQ0rC37x544r8eNXRpnVE3ZZY7zXo8NomiO0ZUCj2uHz58rbXoZ6gc0uA+F6ZeKS/jhRDUq8MKrTho9fEkihMmhxtBI1DxKFY9XLpVcSkfoi8JGnToZO5sU5aiDQIW716ddt7ZLYtMQlhECdBGXZZMWldY5BHm5xgAroWj4C0hbYkSc/jBmggIrXJWlZM6pSETsEPGqZOndr2uuuR5rF169a2HoHPdurUKZM4CO1WTPqaDaAd+GFGKdIQkxAn9RuEWcTRyN2KSUgiSgF5aWzPTeA/lN5rZubMmR2bE4SIC4nJoltgAV/dVefZm72AtctUCJU2CMJ327hxY9t7EHbkyJFseq+EJSY16RPo3Dkq1kkr7+q0bNmyDuLQcZBEPYmHVdOBiJyIlrRDq41YPWfXOxUysi5fvtyaj+2BpcnsUV/oSoEMOk2CQGlr4ckhBwaetBhjCwH0ZHtJROPJkyc7UjcYLDjmrH7ADTEBXFfOYmB0k9oYBOjJ8b4aOYSe7QkKcYhFlq3QYLQhSidNmtS2RATwy8YOM3EQJsUjKiaWZ+vZToUQgzhkHXudb/PW5YMHD9yZM2faPsMwoc7RciYJXbGuBqJ1UIGKKLv915jsvgtJxCZDubdXr165mzdvtr1Hz5LONA8jrUwKPqsmVesKa49S3Q4WxmRPUEYdTjgiUcfUwLx589ySJUva3oMkP6IYddq6HMS4o55xBJBUeRjzfa4Zdeg56QZ43LhxoyPo7Lf1kNt7oO8wWAbNwaYjIv5lhyS7kRf96dvm5Jah8vfvX3flyhX35cuX6HfzFHOToS1H4BenCaHvO8pr8iDuwoUL7tevX+b5ZdbBair0xkFIlFDlW4ZknEClsp/TzXyAKVOmmHWFVSbDNw1l1+4f90U6IY/q4V27dpnE9bJ+v87QEydjqx/UamVVPRG+mwkNTYN+9tjkwzEx+atCm/X9WvWtDtAb68Wy9LXa1UmvCDDIpPkyOQ5ZwSzJ4jMrvFcr0rSjOUh+GcT4LSg5ugkW1Io0/SCDQBojh0hPlaJdah+tkVYrnTZowP8iq1F1TgMBBauufyB33x1v+NWFYmT5KmppgHC+NkAgbmRkpD3yn9QIseXymoTQFGQmIOKTxiZIWpvAatenVqRVXf2nTrAWMsPnKrMZHz6bJq5jvce6QK8J1cQNgKxlJapMPdZSR64/UivS9NztpkVEdKcrs5alhhWP9NeqlfWopzhZScI6QxseegZRGeg5a8C3Re1Mfl1ScP36ddcUaMuv24iOJtz7sbUjTS4qBvKmstYJoUauiuD3k5qhyr7QdUHMeCgLa1Ear9NquemdXgmum4fvJ6w1lqsuDhNrg1qSpleJK7K3TF0Q2jSd94uSZ60kK1e3qyVpQK6PVWXp2/FC3mp6jBhKKOiY2h3gtUV64TWM6wDETRPLDfSakXmH3w8g9Jlug8ZtTt4kVF0kLUYYmCCtD/DrQ5YhMGbA9L3ucdjh0y8kOHW5gU/VEEmJTcL4Pz/f7mgoAbYkAAAAAElFTkSuQmCC"]
|
||||||
}
|
},
|
||||||
]
|
]
|
||||||
}'
|
}'
|
||||||
```
|
```
|
||||||
@@ -578,52 +559,12 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
"images": null
|
"images": null
|
||||||
},
|
},
|
||||||
"done": true,
|
"done": true,
|
||||||
"total_duration": 1668506709,
|
"total_duration":1668506709,
|
||||||
"load_duration": 1986209,
|
"load_duration":1986209,
|
||||||
"prompt_eval_count": 26,
|
"prompt_eval_count":26,
|
||||||
"prompt_eval_duration": 359682000,
|
"prompt_eval_duration":359682000,
|
||||||
"eval_count": 83,
|
"eval_count":83,
|
||||||
"eval_duration": 1303285000
|
"eval_duration":1303285000
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Chat request (Reproducible outputs)
|
|
||||||
|
|
||||||
##### Request
|
|
||||||
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/api/chat -d '{
|
|
||||||
"model": "llama2",
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Hello!"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"options": {
|
|
||||||
"seed": 101,
|
|
||||||
"temperature": 0
|
|
||||||
}
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Response
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"model": "registry.ollama.ai/library/llama2:latest",
|
|
||||||
"created_at": "2023-12-12T14:13:43.416799Z",
|
|
||||||
"message": {
|
|
||||||
"role": "assistant",
|
|
||||||
"content": "Hello! How are you today?"
|
|
||||||
},
|
|
||||||
"done": true,
|
|
||||||
"total_duration": 5191566416,
|
|
||||||
"load_duration": 2154458,
|
|
||||||
"prompt_eval_count": 26,
|
|
||||||
"prompt_eval_duration": 383809000,
|
|
||||||
"eval_count": 298,
|
|
||||||
"eval_duration": 4799921000
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -633,7 +574,7 @@ curl http://localhost:11434/api/chat -d '{
|
|||||||
POST /api/create
|
POST /api/create
|
||||||
```
|
```
|
||||||
|
|
||||||
Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation must also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using [Create a Blob](#create-a-blob) and the value to the path indicated in the response.
|
Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation must also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using [Create a Blob](#create-a-blob) and the value to the path indicated in the response.
|
||||||
|
|
||||||
### Parameters
|
### Parameters
|
||||||
|
|
||||||
@@ -683,6 +624,7 @@ HEAD /api/blobs/:digest
|
|||||||
|
|
||||||
Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not Ollama.ai.
|
Ensures that the file blob used for a FROM or ADAPTER field exists on the server. This is checking your Ollama server and not Ollama.ai.
|
||||||
|
|
||||||
|
|
||||||
#### Query Parameters
|
#### Query Parameters
|
||||||
|
|
||||||
- `digest`: the SHA256 digest of the blob
|
- `digest`: the SHA256 digest of the blob
|
||||||
@@ -1017,7 +959,6 @@ Generate embeddings from a model
|
|||||||
Advanced parameters:
|
Advanced parameters:
|
||||||
|
|
||||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||||
- `keep_alive`: controls how long the model will stay loaded into memory following the request (default: `5m`)
|
|
||||||
|
|
||||||
### Examples
|
### Examples
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
# Development
|
# Development
|
||||||
|
|
||||||
|
- Install cmake or (optionally, required tools for GPUs)
|
||||||
|
- run `go generate ./...`
|
||||||
|
- run `go build .`
|
||||||
|
|
||||||
Install required tools:
|
Install required tools:
|
||||||
|
|
||||||
- cmake version 3.24 or higher
|
- cmake version 3.24 or higher
|
||||||
- go version 1.21 or higher
|
- go version 1.20 or higher
|
||||||
- gcc version 11.4.0 or higher
|
- gcc version 11.4.0 or higher
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -13,11 +17,7 @@ brew install go cmake gcc
|
|||||||
Optionally enable debugging and more verbose logging:
|
Optionally enable debugging and more verbose logging:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# At build time
|
|
||||||
export CGO_CFLAGS="-g"
|
export CGO_CFLAGS="-g"
|
||||||
|
|
||||||
# At runtime
|
|
||||||
export OLLAMA_DEBUG=1
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Get the required libraries and build the native LLM code:
|
Get the required libraries and build the native LLM code:
|
||||||
@@ -38,101 +38,37 @@ Now you can run `ollama`:
|
|||||||
./ollama
|
./ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
### Linux
|
## Building on Linux with GPU support
|
||||||
|
|
||||||
#### Linux CUDA (NVIDIA)
|
|
||||||
|
|
||||||
|
### Linux/Windows CUDA (NVIDIA)
|
||||||
*Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
|
*Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
|
||||||
|
|
||||||
Install `cmake` and `golang` as well as [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads)
|
Install `cmake` and `golang` as well as [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) development and runtime packages.
|
||||||
development and runtime packages.
|
|
||||||
|
|
||||||
Typically the build scripts will auto-detect CUDA, however, if your Linux distro
|
|
||||||
or installation approach uses unusual paths, you can specify the location by
|
|
||||||
specifying an environment variable `CUDA_LIB_DIR` to the location of the shared
|
|
||||||
libraries, and `CUDACXX` to the location of the nvcc compiler. You can customize
|
|
||||||
set set of target CUDA architectues by setting `CMAKE_CUDA_ARCHITECTURES` (e.g. "50;60;70")
|
|
||||||
|
|
||||||
Then generate dependencies:
|
Then generate dependencies:
|
||||||
|
|
||||||
```
|
```
|
||||||
go generate ./...
|
go generate ./...
|
||||||
```
|
```
|
||||||
|
|
||||||
Then build the binary:
|
Then build the binary:
|
||||||
|
|
||||||
```
|
```
|
||||||
go build .
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Linux ROCm (AMD)
|
### Linux ROCm (AMD)
|
||||||
|
|
||||||
*Your operating system distribution may already have packages for AMD ROCm and CLBlast. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
|
*Your operating system distribution may already have packages for AMD ROCm and CLBlast. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
|
||||||
|
|
||||||
Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) developement packages first, as well as `cmake` and `golang`.
|
Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) developement packages first, as well as `cmake` and `golang`.
|
||||||
|
Adjust the paths below (correct for Arch) as appropriate for your distributions install locations and generate dependencies:
|
||||||
Typically the build scripts will auto-detect ROCm, however, if your Linux distro
|
|
||||||
or installation approach uses unusual paths, you can specify the location by
|
|
||||||
specifying an environment variable `ROCM_PATH` to the location of the ROCm
|
|
||||||
install (typically `/opt/rocm`), and `CLBlast_DIR` to the location of the
|
|
||||||
CLBlast install (typically `/usr/lib/cmake/CLBlast`). You can also customize
|
|
||||||
the AMD GPU targets by setting AMDGPU_TARGETS (e.g. `AMDGPU_TARGETS="gfx1101;gfx1102"`)
|
|
||||||
|
|
||||||
```
|
```
|
||||||
go generate ./...
|
CLBlast_DIR=/usr/lib/cmake/CLBlast ROCM_PATH=/opt/rocm go generate ./...
|
||||||
```
|
```
|
||||||
|
|
||||||
Then build the binary:
|
Then build the binary:
|
||||||
|
|
||||||
```
|
```
|
||||||
go build .
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
ROCm requires elevated privileges to access the GPU at runtime. On most distros you can add your user account to the `render` group, or run as root.
|
ROCm requires elevated privileges to access the GPU at runtime. On most distros you can add your user account to the `render` group, or run as root.
|
||||||
|
|
||||||
#### Advanced CPU Settings
|
## Containerized Build
|
||||||
|
|
||||||
By default, running `go generate ./...` will compile a few different variations
|
If you have Docker available, you can build linux binaries with `./scripts/build_linux.sh` which has the CUDA and ROCm dependencies included.
|
||||||
of the LLM library based on common CPU families and vector math capabilities,
|
|
||||||
including a lowest-common-denominator which should run on almost any 64 bit CPU
|
|
||||||
somewhat slowly. At runtime, Ollama will auto-detect the optimal variation to
|
|
||||||
load. If you would like to build a CPU-based build customized for your
|
|
||||||
processor, you can set `OLLAMA_CUSTOM_CPU_DEFS` to the llama.cpp flags you would
|
|
||||||
like to use. For example, to compile an optimized binary for an Intel i9-9880H,
|
|
||||||
you might use:
|
|
||||||
|
|
||||||
```
|
|
||||||
OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./...
|
|
||||||
go build .
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Containerized Linux Build
|
|
||||||
|
|
||||||
If you have Docker available, you can build linux binaries with `./scripts/build_linux.sh` which has the CUDA and ROCm dependencies included. The resulting binary is placed in `./dist`
|
|
||||||
|
|
||||||
|
|
||||||
### Windows
|
|
||||||
|
|
||||||
Note: The windows build for Ollama is still under development.
|
|
||||||
|
|
||||||
Install required tools:
|
|
||||||
|
|
||||||
- MSVC toolchain - C/C++ and cmake as minimal requirements
|
|
||||||
- go version 1.21 or higher
|
|
||||||
- MinGW (pick one variant) with GCC.
|
|
||||||
- <https://www.mingw-w64.org/>
|
|
||||||
- <https://www.msys2.org/>
|
|
||||||
|
|
||||||
```powershell
|
|
||||||
$env:CGO_ENABLED="1"
|
|
||||||
|
|
||||||
go generate ./...
|
|
||||||
|
|
||||||
go build .
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Windows CUDA (NVIDIA)
|
|
||||||
|
|
||||||
In addition to the common Windows development tools described above, install:
|
|
||||||
|
|
||||||
- [NVIDIA CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html)
|
|
||||||
141
docs/faq.md
@@ -2,116 +2,71 @@
|
|||||||
|
|
||||||
## How can I upgrade Ollama?
|
## How can I upgrade Ollama?
|
||||||
|
|
||||||
Ollama on macOS and Windows will automatically download updates. Click on the taskbar or menubar item and then click "Restart to update" to apply the update. Updates can also be installed by downloading the latest version [manually](https://ollama.com/download/).
|
To upgrade Ollama, run the installation process again. On the Mac, click the Ollama icon in the menubar and choose the restart option if an update is available.
|
||||||
|
|
||||||
On Linux, re-run the install script:
|
|
||||||
|
|
||||||
```
|
|
||||||
curl -fsSL https://ollama.com/install.sh | sh
|
|
||||||
```
|
|
||||||
|
|
||||||
## How can I view the logs?
|
## How can I view the logs?
|
||||||
|
|
||||||
Review the [Troubleshooting](./troubleshooting.md) docs for more about using logs.
|
Review the [Troubleshooting](./troubleshooting.md) docs for more about using logs.
|
||||||
|
|
||||||
## How can I specify the context window size?
|
## How do I use Ollama server environment variables on Mac
|
||||||
|
|
||||||
By default, Ollama uses a context window size of 2048 tokens.
|
On macOS, Ollama runs in the background and is managed by the menubar app. If adding environment variables, Ollama will need to be run manually.
|
||||||
|
|
||||||
To change this when using `ollama run`, use `/set parameter`:
|
1. Click the menubar icon for Ollama and choose **Quit Ollama**.
|
||||||
|
2. Open a new terminal window and run the following command (this example uses `OLLAMA_HOST` with an IP address of `123.1.1.1`):
|
||||||
|
|
||||||
```
|
```bash
|
||||||
/set parameter num_ctx 4096
|
OLLAMA_HOST=123.1.1.1 ollama serve
|
||||||
```
|
```
|
||||||
|
|
||||||
When using the API, specify the `num_ctx` parameter:
|
## How do I use Ollama server environment variables on Linux?
|
||||||
|
|
||||||
```
|
If Ollama is installed with the install script, a systemd service was created, running as the Ollama user. To add an environment variable, such as OLLAMA_HOST, follow these steps:
|
||||||
curl http://localhost:11434/api/generate -d '{
|
|
||||||
"model": "llama2",
|
|
||||||
"prompt": "Why is the sky blue?",
|
|
||||||
"options": {
|
|
||||||
"num_ctx": 4096
|
|
||||||
}
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## How do I configure Ollama server?
|
1. Create a `systemd` drop-in directory and add a config file. This is only needed once.
|
||||||
|
|
||||||
Ollama server can be configured with environment variables.
|
```bash
|
||||||
|
mkdir -p /etc/systemd/system/ollama.service.d
|
||||||
|
echo '[Service]' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||||
|
```
|
||||||
|
|
||||||
### Setting environment variables on Mac
|
2. For each environment variable, add it to the config file:
|
||||||
|
|
||||||
If Ollama is run as a macOS application, environment variables should be set using `launchctl`:
|
```bash
|
||||||
|
echo 'Environment="OLLAMA_HOST=0.0.0.0:11434"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||||
|
```
|
||||||
|
|
||||||
1. For each environment variable, call `launchctl setenv`.
|
3. Reload `systemd` and restart Ollama:
|
||||||
|
|
||||||
```bash
|
|
||||||
launchctl setenv OLLAMA_HOST "0.0.0.0"
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Restart Ollama application.
|
|
||||||
|
|
||||||
### Setting environment variables on Linux
|
|
||||||
|
|
||||||
If Ollama is run as a systemd service, environment variables should be set using `systemctl`:
|
|
||||||
|
|
||||||
1. Edit the systemd service by calling `systemctl edit ollama.service`. This will open an editor.
|
|
||||||
|
|
||||||
2. For each environment variable, add a line `Environment` under section `[Service]`:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[Service]
|
|
||||||
Environment="OLLAMA_HOST=0.0.0.0"
|
|
||||||
```
|
|
||||||
|
|
||||||
3. Save and exit.
|
|
||||||
|
|
||||||
4. Reload `systemd` and restart Ollama:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
systemctl restart ollama
|
systemctl restart ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
### Setting environment variables on Windows
|
|
||||||
|
|
||||||
On windows, Ollama inherits your user and system environment variables.
|
|
||||||
|
|
||||||
1. First Quit Ollama by clicking on it in the task bar
|
|
||||||
|
|
||||||
2. Edit system environment variables from the control panel
|
|
||||||
|
|
||||||
3. Edit or create New variable(s) for your user account for `OLLAMA_HOST`, `OLLAMA_MODELS`, etc.
|
|
||||||
|
|
||||||
4. Click OK/Apply to save
|
|
||||||
|
|
||||||
5. Run `ollama` from a new terminal window
|
|
||||||
|
|
||||||
|
|
||||||
## How can I expose Ollama on my network?
|
## How can I expose Ollama on my network?
|
||||||
|
|
||||||
Ollama binds 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable.
|
Ollama binds to 127.0.0.1 port 11434 by default. Change the bind address with the `OLLAMA_HOST` environment variable. Refer to the section above for how to use environment variables on your platform.
|
||||||
|
|
||||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
|
||||||
|
|
||||||
## How can I allow additional web origins to access Ollama?
|
## How can I allow additional web origins to access Ollama?
|
||||||
|
|
||||||
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Additional origins can be configured with `OLLAMA_ORIGINS`.
|
Ollama allows cross-origin requests from `127.0.0.1` and `0.0.0.0` by default. Add additional origins with the `OLLAMA_ORIGINS` environment variable. For example, to add all ports on 192.168.1.1 and https://example.com, use:
|
||||||
|
|
||||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
```shell
|
||||||
|
OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
Refer to the section above for how to use environment variables on your platform.
|
||||||
|
|
||||||
## Where are models stored?
|
## Where are models stored?
|
||||||
|
|
||||||
- macOS: `~/.ollama/models`
|
- macOS: `~/.ollama/models`.
|
||||||
- Linux: `/usr/share/ollama/.ollama/models`
|
- Linux: `/usr/share/ollama/.ollama/models`
|
||||||
- Windows: `C:\Users\<username>\.ollama\models`
|
|
||||||
|
|
||||||
### How do I set them to a different location?
|
See [the CLI Documentation](./cli.md) for more on this.
|
||||||
|
|
||||||
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory.
|
## How do I set them to a different location?
|
||||||
|
|
||||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
If a different directory needs to be used, set the environment variable `OLLAMA_MODELS` to the chosen directory. Refer to the section above for how to use environment variables on your platform.
|
||||||
|
|
||||||
## Does Ollama send my prompts and answers back to Ollama.ai to use in any way?
|
## Does Ollama send my prompts and answers back to Ollama.ai to use in any way?
|
||||||
|
|
||||||
@@ -160,36 +115,6 @@ Open `Control Panel > Networking and Internet > View network status and tasks` a
|
|||||||
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
||||||
properties.
|
properties.
|
||||||
|
|
||||||
## How can I pre-load a model to get faster response times?
|
## What is context, can I increase it, and why doesn't every model support a huge context?
|
||||||
|
|
||||||
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
Context refers to the size of the input that can be sent to a model and get sensible output back. Many models have a context size of 2048 tokens. It's sometimes possible to give it more, but the answers start to degrade. Newer models have been able to increase that context size using different methods. This increase in context size results in a corresponding increase in memory required, sometimes by orders of magnitude.
|
||||||
|
|
||||||
To preload the mistral model using the generate endpoint, use:
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/api/generate -d '{"model": "mistral"}'
|
|
||||||
```
|
|
||||||
|
|
||||||
To use the chat completions endpoint, use:
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## How do I keep a model loaded in memory or make it unload immediately?
|
|
||||||
|
|
||||||
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
|
||||||
|
|
||||||
The `keep_alive` parameter can be set to:
|
|
||||||
* a duration string (such as "10m" or "24h")
|
|
||||||
* a number in seconds (such as 3600)
|
|
||||||
* any negative number which will keep the model loaded in memory (e.g. -1 or "-1m")
|
|
||||||
* '0' which will unload the model immediately after generating a response
|
|
||||||
|
|
||||||
For example, to preload a model and leave it in memory use:
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/api/generate -d '{"model": "llama2", "keep_alive": -1}'
|
|
||||||
```
|
|
||||||
|
|
||||||
To unload the model and free up memory use:
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/api/generate -d '{"model": "llama2", "keep_alive": 0}'
|
|
||||||
```
|
|
||||||
|
|||||||
121
docs/import.md
@@ -15,7 +15,7 @@ FROM ./mistral-7b-v0.1.Q4_0.gguf
|
|||||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
||||||
|
|
||||||
```
|
```
|
||||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
FROM ./q4_0.bin
|
||||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -37,69 +37,55 @@ ollama run example "What is your favourite condiment?"
|
|||||||
|
|
||||||
## Importing (PyTorch & Safetensors)
|
## Importing (PyTorch & Safetensors)
|
||||||
|
|
||||||
> Importing from PyTorch and Safetensors is a longer process than importing from GGUF. Improvements that make it easier are a work in progress.
|
### Supported models
|
||||||
|
|
||||||
### Setup
|
Ollama supports a set of model architectures, with support for more coming soon:
|
||||||
|
|
||||||
First, clone the `ollama/ollama` repo:
|
- Llama & Mistral
|
||||||
|
- Falcon & RW
|
||||||
|
- BigCode
|
||||||
|
|
||||||
```
|
To view a model's architecture, check the `config.json` file in its HuggingFace repo. You should see an entry under `architectures` (e.g. `LlamaForCausalLM`).
|
||||||
git clone git@github.com:ollama/ollama.git ollama
|
|
||||||
cd ollama
|
|
||||||
```
|
|
||||||
|
|
||||||
and then fetch its `llama.cpp` submodule:
|
### Step 1: Clone the HuggingFace repository (optional)
|
||||||
|
|
||||||
```shell
|
|
||||||
git submodule init
|
|
||||||
git submodule update llm/llama.cpp
|
|
||||||
```
|
|
||||||
|
|
||||||
Next, install the Python dependencies:
|
|
||||||
|
|
||||||
```
|
|
||||||
python3 -m venv llm/llama.cpp/.venv
|
|
||||||
source llm/llama.cpp/.venv/bin/activate
|
|
||||||
pip install -r llm/llama.cpp/requirements.txt
|
|
||||||
```
|
|
||||||
|
|
||||||
Then build the `quantize` tool:
|
|
||||||
|
|
||||||
```
|
|
||||||
make -C llm/llama.cpp quantize
|
|
||||||
```
|
|
||||||
|
|
||||||
### Clone the HuggingFace repository (optional)
|
|
||||||
|
|
||||||
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
||||||
|
|
||||||
Install [Git LFS](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage), verify it's installed, and then clone the model's repository:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
git lfs install
|
git lfs install
|
||||||
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1 model
|
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
|
||||||
|
cd Mistral-7B-Instruct-v0.1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Convert the model
|
### Step 2: Convert and quantize to a `.bin` file (optional, for PyTorch and Safetensors)
|
||||||
|
|
||||||
> Note: some model architectures require using specific convert scripts. For example, Qwen models require running `convert-hf-to-gguf.py` instead of `convert.py`
|
If the model is in PyTorch or Safetensors format, a [Docker image](https://hub.docker.com/r/ollama/quantize) with the tooling required to convert and quantize models is available.
|
||||||
|
|
||||||
|
First, Install [Docker](https://www.docker.com/get-started/).
|
||||||
|
|
||||||
|
Next, to convert and quantize your model, run:
|
||||||
|
|
||||||
```
|
```
|
||||||
python llm/llama.cpp/convert.py ./model --outtype f16 --outfile converted.bin
|
docker run --rm -v .:/model ollama/quantize -q q4_0 /model
|
||||||
```
|
```
|
||||||
|
|
||||||
### Quantize the model
|
This will output two files into the directory:
|
||||||
|
|
||||||
```
|
- `f16.bin`: the model converted to GGUF
|
||||||
llm/llama.cpp/quantize converted.bin quantized.bin q4_0
|
- `q4_0.bin` the model quantized to a 4-bit quantization (Ollama will use this file to create the Ollama model)
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Write a `Modelfile`
|
### Step 3: Write a `Modelfile`
|
||||||
|
|
||||||
Next, create a `Modelfile` for your model:
|
Next, create a `Modelfile` for your model:
|
||||||
|
|
||||||
```
|
```
|
||||||
FROM quantized.bin
|
FROM ./q4_0.bin
|
||||||
|
```
|
||||||
|
|
||||||
|
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
||||||
|
|
||||||
|
```
|
||||||
|
FROM ./q4_0.bin
|
||||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -123,9 +109,9 @@ ollama run example "What is your favourite condiment?"
|
|||||||
|
|
||||||
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
||||||
|
|
||||||
1. Create [an account](https://ollama.com/signup)
|
1. Create [an account](https://ollama.ai/signup)
|
||||||
2. Run `cat ~/.ollama/id_ed25519.pub` (or `type %USERPROFILE%\.ollama\id_ed25519.pub` on Windows) to view your Ollama public key. Copy this to the clipboard.
|
2. Run `cat ~/.ollama/id_ed25519.pub` to view your Ollama public key. Copy this to the clipboard.
|
||||||
3. Add your public key to your [Ollama account](https://ollama.com/settings/keys)
|
3. Add your public key to your [Ollama account](https://ollama.ai/settings/keys)
|
||||||
|
|
||||||
Next, copy your model to your username's namespace:
|
Next, copy your model to your username's namespace:
|
||||||
|
|
||||||
@@ -139,7 +125,7 @@ Then push the model:
|
|||||||
ollama push <your username>/example
|
ollama push <your username>/example
|
||||||
```
|
```
|
||||||
|
|
||||||
After publishing, your model will be available at `https://ollama.com/<your username>/example`.
|
After publishing, your model will be available at `https://ollama.ai/<your username>/example`.
|
||||||
|
|
||||||
## Quantization reference
|
## Quantization reference
|
||||||
|
|
||||||
@@ -162,4 +148,47 @@ The quantization options are as follow (from highest highest to lowest levels of
|
|||||||
- `q5_K_M`
|
- `q5_K_M`
|
||||||
- `q6_K`
|
- `q6_K`
|
||||||
- `q8_0`
|
- `q8_0`
|
||||||
- `f16`
|
|
||||||
|
## Manually converting & quantizing models
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
Start by cloning the `llama.cpp` repo to your machine in another directory:
|
||||||
|
|
||||||
|
```
|
||||||
|
git clone https://github.com/ggerganov/llama.cpp.git
|
||||||
|
cd llama.cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, install the Python dependencies:
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install -r requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, build the `quantize` tool:
|
||||||
|
|
||||||
|
```
|
||||||
|
make quantize
|
||||||
|
```
|
||||||
|
|
||||||
|
### Convert the model
|
||||||
|
|
||||||
|
Run the correct conversion script for your model architecture:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# LlamaForCausalLM or MistralForCausalLM
|
||||||
|
python convert.py <path to model directory>
|
||||||
|
|
||||||
|
# FalconForCausalLM
|
||||||
|
python convert-falcon-hf-to-gguf.py <path to model directory>
|
||||||
|
|
||||||
|
# GPTBigCodeForCausalLM
|
||||||
|
python convert-starcoder-hf-to-gguf.py <path to model directory>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Quantize the model
|
||||||
|
|
||||||
|
```
|
||||||
|
quantize <path to model dir>/ggml-model-f32.bin <path to model dir>/q4_0.bin q4_0
|
||||||
|
```
|
||||||
|
|||||||
@@ -3,11 +3,9 @@
|
|||||||
## Install
|
## Install
|
||||||
|
|
||||||
Install Ollama running this one-liner:
|
Install Ollama running this one-liner:
|
||||||
|
|
||||||
>
|
>
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -fsSL https://ollama.com/install.sh | sh
|
curl https://ollama.ai/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
## Manual install
|
## Manual install
|
||||||
@@ -17,7 +15,7 @@ curl -fsSL https://ollama.com/install.sh | sh
|
|||||||
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
|
Ollama is distributed as a self-contained binary. Download it to a directory in your PATH:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||||
sudo chmod +x /usr/bin/ollama
|
sudo chmod +x /usr/bin/ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -77,13 +75,13 @@ sudo systemctl start ollama
|
|||||||
Update ollama by running the install script again:
|
Update ollama by running the install script again:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -fsSL https://ollama.com/install.sh | sh
|
curl https://ollama.ai/install.sh | sh
|
||||||
```
|
```
|
||||||
|
|
||||||
Or by downloading the ollama binary:
|
Or by downloading the ollama binary:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
sudo curl -L https://ollama.ai/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||||
sudo chmod +x /usr/bin/ollama
|
sudo chmod +x /usr/bin/ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -111,10 +109,8 @@ Remove the ollama binary from your bin directory (either `/usr/local/bin`, `/usr
|
|||||||
sudo rm $(which ollama)
|
sudo rm $(which ollama)
|
||||||
```
|
```
|
||||||
|
|
||||||
Remove the downloaded models and Ollama service user and group:
|
Remove the downloaded models and Ollama service user:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
sudo rm -r /usr/share/ollama
|
sudo rm -r /usr/share/ollama
|
||||||
sudo userdel ollama
|
sudo userdel ollama
|
||||||
sudo groupdel ollama
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ A model file is the blueprint to create and share models with Ollama.
|
|||||||
- [SYSTEM](#system)
|
- [SYSTEM](#system)
|
||||||
- [ADAPTER](#adapter)
|
- [ADAPTER](#adapter)
|
||||||
- [LICENSE](#license)
|
- [LICENSE](#license)
|
||||||
- [MESSAGE](#message)
|
|
||||||
- [Notes](#notes)
|
- [Notes](#notes)
|
||||||
|
|
||||||
## Format
|
## Format
|
||||||
@@ -39,7 +38,6 @@ INSTRUCTION arguments
|
|||||||
| [`SYSTEM`](#system) | Specifies the system message that will be set in the template. |
|
| [`SYSTEM`](#system) | Specifies the system message that will be set in the template. |
|
||||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||||
| [`MESSAGE`](#message) | Specify message history. |
|
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
@@ -67,13 +65,13 @@ To use this:
|
|||||||
|
|
||||||
More examples are available in the [examples directory](../examples).
|
More examples are available in the [examples directory](../examples).
|
||||||
|
|
||||||
### `Modelfile`s in [ollama.com/library][1]
|
### `Modelfile`s in [ollama.ai/library][1]
|
||||||
|
|
||||||
There are two ways to view `Modelfile`s underlying the models in [ollama.com/library][1]:
|
There are two ways to view `Modelfile`s underlying the models in [ollama.ai/library][1]:
|
||||||
|
|
||||||
- Option 1: view a details page from a model's tags page:
|
- Option 1: view a details page from a model's tags page:
|
||||||
1. Go to a particular model's tags (e.g. https://ollama.com/library/llama2/tags)
|
1. Go to a particular model's tags (e.g. https://ollama.ai/library/llama2/tags)
|
||||||
2. Click on a tag (e.g. https://ollama.com/library/llama2:13b)
|
2. Click on a tag (e.g. https://ollama.ai/library/llama2:13b)
|
||||||
3. Scroll down to "Layers"
|
3. Scroll down to "Layers"
|
||||||
- Note: if the [`FROM` instruction](#from-required) is not present,
|
- Note: if the [`FROM` instruction](#from-required) is not present,
|
||||||
it means the model was created from a local file
|
it means the model was created from a local file
|
||||||
@@ -86,7 +84,7 @@ There are two ways to view `Modelfile`s underlying the models in [ollama.com/lib
|
|||||||
# FROM llama2:13b
|
# FROM llama2:13b
|
||||||
|
|
||||||
FROM /root/.ollama/models/blobs/sha256:123abc
|
FROM /root/.ollama/models/blobs/sha256:123abc
|
||||||
TEMPLATE """[INST] {{ if .System }}<<SYS>>{{ .System }}<</SYS>>
|
TEMPLATE """[INST] {{ if and .First .System }}<<SYS>>{{ .System }}<</SYS>>
|
||||||
|
|
||||||
{{ end }}{{ .Prompt }} [/INST] """
|
{{ end }}{{ .Prompt }} [/INST] """
|
||||||
SYSTEM """"""
|
SYSTEM """"""
|
||||||
@@ -154,23 +152,30 @@ PARAMETER <parameter> <parametervalue>
|
|||||||
|
|
||||||
### TEMPLATE
|
### TEMPLATE
|
||||||
|
|
||||||
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system message, a user's message and the response from the model. Note: syntax may be model specific. Templates use Go [template syntax](https://pkg.go.dev/text/template).
|
`TEMPLATE` of the full prompt template to be passed into the model. It may include (optionally) a system message and a user's prompt. This is used to create a full custom prompt, and syntax may be model specific. You can usually find the template for a given model in the readme for that model.
|
||||||
|
|
||||||
#### Template Variables
|
#### Template Variables
|
||||||
|
|
||||||
| Variable | Description |
|
| Variable | Description |
|
||||||
| ----------------- | --------------------------------------------------------------------------------------------- |
|
| --------------- | ------------------------------------------------------------------------------------------------------------- |
|
||||||
| `{{ .System }}` | The system message used to specify custom behavior. |
|
| `{{ .System }}` | The system message used to specify custom behavior, this must also be set in the Modelfile as an instruction. |
|
||||||
| `{{ .Prompt }}` | The user prompt message. |
|
| `{{ .Prompt }}` | The incoming prompt, this is not specified in the model file and will be set based on input. |
|
||||||
| `{{ .Response }}` | The response from the model. When generating a response, text after this variable is omitted. |
|
| `{{ .First }}` | A boolean value used to render specific template information for the first generation of a session. |
|
||||||
|
|
||||||
```
|
```modelfile
|
||||||
TEMPLATE """{{ if .System }}<|im_start|>system
|
TEMPLATE """
|
||||||
{{ .System }}<|im_end|>
|
{{- if .First }}
|
||||||
{{ end }}{{ if .Prompt }}<|im_start|>user
|
### System:
|
||||||
{{ .Prompt }}<|im_end|>
|
{{ .System }}
|
||||||
{{ end }}<|im_start|>assistant
|
{{- end }}
|
||||||
|
|
||||||
|
### User:
|
||||||
|
{{ .Prompt }}
|
||||||
|
|
||||||
|
### Response:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
SYSTEM """<system message>"""
|
||||||
```
|
```
|
||||||
|
|
||||||
### SYSTEM
|
### SYSTEM
|
||||||
@@ -199,22 +204,9 @@ LICENSE """
|
|||||||
"""
|
"""
|
||||||
```
|
```
|
||||||
|
|
||||||
### MESSAGE
|
|
||||||
|
|
||||||
The `MESSAGE` instruction allows you to specify a message history for the model to use when responding:
|
|
||||||
|
|
||||||
```modelfile
|
|
||||||
MESSAGE user Is Toronto in Canada?
|
|
||||||
MESSAGE assistant yes
|
|
||||||
MESSAGE user Is Sacramento in Canada?
|
|
||||||
MESSAGE assistant no
|
|
||||||
MESSAGE user Is Ontario in Canada?
|
|
||||||
MESSAGE assistant yes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
- the **`Modelfile` is not case sensitive**. In the examples, uppercase instructions are used to make it easier to distinguish it from arguments.
|
- the **`Modelfile` is not case sensitive**. In the examples, uppercase instructions are used to make it easier to distinguish it from arguments.
|
||||||
- Instructions can be in any order. In the examples, the `FROM` instruction is first to keep it easily readable.
|
- Instructions can be in any order. In the examples, the `FROM` instruction is first to keep it easily readable.
|
||||||
|
|
||||||
[1]: https://ollama.com/library
|
[1]: https://ollama.ai/library
|
||||||
|
|||||||
141
docs/openai.md
@@ -1,141 +0,0 @@
|
|||||||
# OpenAI compatibility
|
|
||||||
|
|
||||||
> **Note:** OpenAI compatibility is experimental and is subject to major adjustments including breaking changes. For fully-featured access to the Ollama API, see the Ollama [Python library](https://github.com/ollama/ollama-python), [JavaScript library](https://github.com/ollama/ollama-js) and [REST API](https://github.com/jmorganca/ollama/blob/main/docs/api.md).
|
|
||||||
|
|
||||||
Ollama provides experimental compatibility with parts of the [OpenAI API](https://platform.openai.com/docs/api-reference) to help connect existing applications to Ollama.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### OpenAI Python library
|
|
||||||
|
|
||||||
```python
|
|
||||||
from openai import OpenAI
|
|
||||||
|
|
||||||
client = OpenAI(
|
|
||||||
base_url='http://localhost:11434/v1/',
|
|
||||||
|
|
||||||
# required but ignored
|
|
||||||
api_key='ollama',
|
|
||||||
)
|
|
||||||
|
|
||||||
chat_completion = client.chat.completions.create(
|
|
||||||
messages=[
|
|
||||||
{
|
|
||||||
'role': 'user',
|
|
||||||
'content': 'Say this is a test',
|
|
||||||
}
|
|
||||||
],
|
|
||||||
model='llama2',
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### OpenAI JavaScript library
|
|
||||||
|
|
||||||
```javascript
|
|
||||||
import OpenAI from 'openai'
|
|
||||||
|
|
||||||
const openai = new OpenAI({
|
|
||||||
baseURL: 'http://localhost:11434/v1/',
|
|
||||||
|
|
||||||
// required but ignored
|
|
||||||
apiKey: 'ollama',
|
|
||||||
})
|
|
||||||
|
|
||||||
const chatCompletion = await openai.chat.completions.create({
|
|
||||||
messages: [{ role: 'user', content: 'Say this is a test' }],
|
|
||||||
model: 'llama2',
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
### `curl`
|
|
||||||
|
|
||||||
```
|
|
||||||
curl http://localhost:11434/v1/chat/completions \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{
|
|
||||||
"model": "llama2",
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": "You are a helpful assistant."
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Hello!"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Endpoints
|
|
||||||
|
|
||||||
### `/v1/chat/completions`
|
|
||||||
|
|
||||||
#### Supported features
|
|
||||||
|
|
||||||
- [x] Chat completions
|
|
||||||
- [x] Streaming
|
|
||||||
- [x] JSON mode
|
|
||||||
- [x] Reproducible outputs
|
|
||||||
- [ ] Vision
|
|
||||||
- [ ] Function calling
|
|
||||||
- [ ] Logprobs
|
|
||||||
|
|
||||||
#### Supported request fields
|
|
||||||
|
|
||||||
- [x] `model`
|
|
||||||
- [x] `messages`
|
|
||||||
- [x] Text `content`
|
|
||||||
- [ ] Array of `content` parts
|
|
||||||
- [x] `frequency_penalty`
|
|
||||||
- [x] `presence_penalty`
|
|
||||||
- [x] `response_format`
|
|
||||||
- [x] `seed`
|
|
||||||
- [x] `stop`
|
|
||||||
- [x] `stream`
|
|
||||||
- [x] `temperature`
|
|
||||||
- [x] `top_p`
|
|
||||||
- [x] `max_tokens`
|
|
||||||
- [ ] `logit_bias`
|
|
||||||
- [ ] `tools`
|
|
||||||
- [ ] `tool_choice`
|
|
||||||
- [ ] `user`
|
|
||||||
- [ ] `n`
|
|
||||||
|
|
||||||
#### Notes
|
|
||||||
|
|
||||||
- Setting `seed` will always set `temperature` to `0`
|
|
||||||
- `finish_reason` will always be `stop`
|
|
||||||
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
|
|
||||||
|
|
||||||
## Models
|
|
||||||
|
|
||||||
Before using a model, pull it locally `ollama pull`:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
ollama pull llama2
|
|
||||||
```
|
|
||||||
|
|
||||||
### Default model names
|
|
||||||
|
|
||||||
For tooling that relies on default OpenAI model names such as `gpt-3.5-turbo`, use `ollama cp` to copy an existing model name to a temporary name:
|
|
||||||
|
|
||||||
```
|
|
||||||
ollama cp llama2 gpt-3.5-turbo
|
|
||||||
```
|
|
||||||
|
|
||||||
Afterwards, this new model name can be specified the `model` field:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
curl http://localhost:11434/v1/chat/completions \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{
|
|
||||||
"model": "gpt-3.5-turbo",
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Hello!"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}'
|
|
||||||
```
|
|
||||||
@@ -1,72 +1,22 @@
|
|||||||
# How to troubleshoot issues
|
# How to troubleshoot issues
|
||||||
|
|
||||||
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
|
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on Mac by running the command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
cat ~/.ollama/logs/server.log
|
cat ~/.ollama/logs/server.log
|
||||||
```
|
```
|
||||||
|
|
||||||
On **Linux** systems with systemd, the logs can be found with this command:
|
On Linux systems with systemd, the logs can be found with this command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
journalctl -u ollama
|
journalctl -u ollama
|
||||||
```
|
```
|
||||||
|
|
||||||
When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
|
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||||
|
|
||||||
```shell
|
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
||||||
docker logs <container-name>
|
|
||||||
```
|
## Known issues
|
||||||
(Use `docker ps` to find the container name)
|
|
||||||
|
|
||||||
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
* `signal: illegal instruction (core dumped)`: Ollama requires AVX support from the CPU. This was introduced in 2011 and CPUs started offering it in 2012. CPUs from before that and some lower end CPUs after that may not have AVX support and thus are not supported by Ollama. Some users have had luck with building Ollama on their machines disabling the need for AVX.
|
||||||
|
|
||||||
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
|
||||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
|
||||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
|
||||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
|
||||||
|
|
||||||
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
|
||||||
```powershell
|
|
||||||
$env:OLLAMA_DEBUG="1"
|
|
||||||
& "ollama app.exe"
|
|
||||||
```
|
|
||||||
|
|
||||||
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
|
||||||
|
|
||||||
## LLM libraries
|
|
||||||
|
|
||||||
Ollama includes multiple LLM libraries compiled for different GPUs and CPU
|
|
||||||
vector features. Ollama tries to pick the best one based on the capabilities of
|
|
||||||
your system. If this autodetection has problems, or you run into other problems
|
|
||||||
(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM
|
|
||||||
library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest
|
|
||||||
but most compatible is `cpu`. Rosetta emulation under MacOS will work with the
|
|
||||||
`cpu` library.
|
|
||||||
|
|
||||||
In the server log, you will see a message that looks something like this (varies
|
|
||||||
from release to release):
|
|
||||||
|
|
||||||
```
|
|
||||||
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
|
||||||
```
|
|
||||||
|
|
||||||
**Experimental LLM Library Override**
|
|
||||||
|
|
||||||
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass
|
|
||||||
autodetection, so for example, if you have a CUDA card, but want to force the
|
|
||||||
CPU LLM library with AVX2 vector support, use:
|
|
||||||
|
|
||||||
```
|
|
||||||
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
|
||||||
```
|
|
||||||
|
|
||||||
You can see what features your CPU has with the following.
|
|
||||||
```
|
|
||||||
cat /proc/cpuinfo| grep flags | head -1
|
|
||||||
```
|
|
||||||
|
|
||||||
## Known issues
|
|
||||||
|
|
||||||
* N/A
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ Prerequisites:
|
|||||||
|
|
||||||
Here are the steps:
|
Here are the steps:
|
||||||
|
|
||||||
- Install Ollama via standard Linux command (ignore the 404 error): `curl https://ollama.com/install.sh | sh`
|
- Install Ollama via standard Linux command (ignore the 404 error): `curl https://ollama.ai/install.sh | sh`
|
||||||
- Stop the Ollama service: `sudo systemctl stop ollama`
|
- Stop the Ollama service: `sudo systemctl stop ollama`
|
||||||
- Start Ollama serve in a tmux session called ollama_jetson and reference the CUDA libraries path: `tmux has-session -t ollama_jetson 2>/dev/null || tmux new-session -d -s ollama_jetson
|
- Start Ollama serve in a tmux session called ollama_jetson and reference the CUDA libraries path: `tmux has-session -t ollama_jetson 2>/dev/null || tmux new-session -d -s ollama_jetson
|
||||||
'LD_LIBRARY_PATH=/usr/local/cuda/lib64 ollama serve'`
|
'LD_LIBRARY_PATH=/usr/local/cuda/lib64 ollama serve'`
|
||||||
|
|||||||
@@ -1,46 +0,0 @@
|
|||||||
# Ollama Windows Preview
|
|
||||||
|
|
||||||
Welcome to the Ollama Windows preview.
|
|
||||||
|
|
||||||
No more WSL required!
|
|
||||||
|
|
||||||
Ollama now runs as a native Windows application, including NVIDIA GPU support.
|
|
||||||
After installing Ollama Windows Preview, Ollama will run in the background and
|
|
||||||
the `ollama` command line is available in `cmd`, `powershell` or your favorite
|
|
||||||
terminal application. As usual the Ollama [api](./api.md) will be served on
|
|
||||||
`http://localhost:11434`.
|
|
||||||
|
|
||||||
As this is a preview release, you should expect a few bugs here and there. If
|
|
||||||
you run into a problem you can reach out on
|
|
||||||
[Discord](https://discord.gg/ollama), or file an
|
|
||||||
[issue](https://github.com/ollama/ollama/issues).
|
|
||||||
Logs will often be helpful in dianosing the problem (see
|
|
||||||
[Troubleshooting](#troubleshooting) below)
|
|
||||||
|
|
||||||
## System Requirements
|
|
||||||
|
|
||||||
* Windows 10 or newer, Home or Pro
|
|
||||||
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
|
||||||
|
|
||||||
## API Access
|
|
||||||
|
|
||||||
Here's a quick example showing API access from `powershell`
|
|
||||||
```powershell
|
|
||||||
(Invoke-WebRequest -method POST -Body '{"model":"llama2", "prompt":"Why is the sky blue?", "stream": false}' -uri http://localhost:11434/api/generate ).Content | ConvertFrom-json
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
While we're in preview, `OLLAMA_DEBUG` is always enabled, which adds
|
|
||||||
a "view logs" menu item to the app, and increses logging for the GUI app and
|
|
||||||
server.
|
|
||||||
|
|
||||||
Ollama on Windows stores files in a few different locations. You can view them in
|
|
||||||
the explorer window by hitting `<cmd>+R` and type in:
|
|
||||||
- `explorer %LOCALAPPDATA%\Ollama` contains logs, and downloaded updates
|
|
||||||
- *app.log* contains logs from the GUI application
|
|
||||||
- *server.log* contains the server logs
|
|
||||||
- *upgrade.log* contains log output for upgrades
|
|
||||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` contains the binaries (The installer adds this to your user PATH)
|
|
||||||
- `explorer %HOMEPATH%\.ollama` contains models and configuration
|
|
||||||
- `explorer %TEMP%` contains temporary executable files in one or more `ollama*` directories
|
|
||||||
@@ -18,8 +18,6 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
responseData, err := io.ReadAll(resp.Body)
|
responseData, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@
|
|||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"# Download and run the Ollama Linux install script\n",
|
"# Download and run the Ollama Linux install script\n",
|
||||||
"!curl -fsSL https://ollama.com/install.sh | sh\n",
|
"!curl https://ollama.ai/install.sh | sh\n",
|
||||||
"!command -v systemctl >/dev/null && sudo systemctl stop ollama"
|
"!command -v systemctl >/dev/null && sudo systemctl stop ollama"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -2,28 +2,28 @@
|
|||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- Ollama: https://ollama.com/download
|
- Ollama: https://ollama.ai/download
|
||||||
- Kubernetes cluster. This example will use Google Kubernetes Engine.
|
- Kubernetes cluster. This example will use Google Kubernetes Engine.
|
||||||
|
|
||||||
## Steps
|
## Steps
|
||||||
|
|
||||||
1. Create the Ollama namespace, daemon set, and service
|
1. Create the Ollama namespace, daemon set, and service
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl apply -f cpu.yaml
|
kubectl apply -f cpu.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Port forward the Ollama service to connect and use it locally
|
1. Port forward the Ollama service to connect and use it locally
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
kubectl -n ollama port-forward service/ollama 11434:80
|
kubectl -n ollama port-forward service/ollama 11434:80
|
||||||
```
|
```
|
||||||
|
|
||||||
1. Pull and run a model, for example `orca-mini:3b`
|
1. Pull and run a model, for example `orca-mini:3b`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama run orca-mini:3b
|
ollama run orca-mini:3b
|
||||||
```
|
```
|
||||||
|
|
||||||
## (Optional) Hardware Acceleration
|
## (Optional) Hardware Acceleration
|
||||||
|
|
||||||
|
|||||||