From 784bf88b0d0005b771e1bab5adfd6094a3693494 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 18 Jun 2024 16:22:47 -0700 Subject: [PATCH 001/106] Wire up windows AMD driver reporting This seems to be ROCm version, not actually driver version, but it may be useful for toggling logic for VRAM reporting in the future --- gpu/amd_hip_windows.go | 5 ++--- gpu/amd_windows.go | 17 +++++++---------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/gpu/amd_hip_windows.go b/gpu/amd_hip_windows.go index 8572a24c5..2586278c8 100644 --- a/gpu/amd_hip_windows.go +++ b/gpu/amd_hip_windows.go @@ -84,9 +84,8 @@ func (hl *HipLib) AMDDriverVersion() (driverMajor, driverMinor int, err error) { } slog.Debug("hipDriverGetVersion", "version", version) - // TODO - this isn't actually right, but the docs claim hipDriverGetVersion isn't accurate anyway... - driverMajor = version / 1000 - driverMinor = (version - (driverMajor * 1000)) / 10 + driverMajor = version / 10000000 + driverMinor = (version - (driverMajor * 10000000)) / 100000 return driverMajor, driverMinor, nil } diff --git a/gpu/amd_windows.go b/gpu/amd_windows.go index 21585277a..0c76f6b9d 100644 --- a/gpu/amd_windows.go +++ b/gpu/amd_windows.go @@ -35,12 +35,11 @@ func AMDGetGPUInfo() []RocmGPUInfo { } defer hl.Release() - // TODO - this reports incorrect version information, so omitting for now - // driverMajor, driverMinor, err := hl.AMDDriverVersion() - // if err != nil { - // // For now this is benign, but we may eventually need to fail compatibility checks - // slog.Debug("error looking up amd driver version", "error", err) - // } + driverMajor, driverMinor, err := hl.AMDDriverVersion() + if err != nil { + // For now this is benign, but we may eventually need to fail compatibility checks + slog.Debug("error looking up amd driver version", "error", err) + } // Note: the HIP library automatically handles subsetting to any HIP_VISIBLE_DEVICES the user specified count := hl.HipGetDeviceCount() @@ -131,10 +130,8 @@ func AMDGetGPUInfo() []RocmGPUInfo { MinimumMemory: rocmMinimumMemory, Name: name, Compute: gfx, - - // TODO - this information isn't accurate on windows, so don't report it until we find the right way to retrieve - // DriverMajor: driverMajor, - // DriverMinor: driverMinor, + DriverMajor: driverMajor, + DriverMinor: driverMinor, }, index: i, } From 1a1c99e3346da21bf2062fa266cf39da954c66a8 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 18 Jun 2024 17:13:54 -0700 Subject: [PATCH 002/106] Bump latest fedora cuda repo to 39 --- scripts/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/install.sh b/scripts/install.sh index 0f12d7e09..2a06c350a 100644 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -279,7 +279,7 @@ if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\ case $OS_NAME in centos|rhel) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -d '.' -f 1) ;; rocky) install_cuda_driver_yum 'rhel' $(echo $OS_VERSION | cut -c1) ;; - fedora) [ $OS_VERSION -lt '37' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '37';; + fedora) [ $OS_VERSION -lt '39' ] && install_cuda_driver_yum $OS_NAME $OS_VERSION || install_cuda_driver_yum $OS_NAME '39';; amzn) install_cuda_driver_yum 'fedora' '37' ;; debian) install_cuda_driver_apt $OS_NAME $OS_VERSION ;; ubuntu) install_cuda_driver_apt $OS_NAME $(echo $OS_VERSION | sed 's/\.//') ;; From 23e899f32d9f7b3bbe0b902a95c23be5a1254409 Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Thu, 20 Jun 2024 08:51:35 -0700 Subject: [PATCH 003/106] skip os.removeAll() if PID does not exist --- gpu/assets.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gpu/assets.go b/gpu/assets.go index f2adcf3e3..fdb3dd81d 100644 --- a/gpu/assets.go +++ b/gpu/assets.go @@ -87,6 +87,8 @@ func cleanupTmpDirs() { } } else { slog.Debug("failed to open ollama.pid", "path", d, "error", err) + // No pid, ignore this tmpdir + continue } err = os.RemoveAll(d) if err != nil { From 4ebb66c6623d85f4fb69db0406ddd05bdc2d893d Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Thu, 20 Jun 2024 09:23:43 -0700 Subject: [PATCH 004/106] reformat error check --- gpu/assets.go | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/gpu/assets.go b/gpu/assets.go index fdb3dd81d..e2abfd58f 100644 --- a/gpu/assets.go +++ b/gpu/assets.go @@ -77,19 +77,20 @@ func cleanupTmpDirs() { continue } raw, err := os.ReadFile(filepath.Join(d, "ollama.pid")) - if err == nil { - pid, err := strconv.Atoi(string(raw)) - if err == nil { - if proc, err := os.FindProcess(pid); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) { - // Another running ollama, ignore this tmpdir - continue - } - } - } else { - slog.Debug("failed to open ollama.pid", "path", d, "error", err) + if err != nil { + slog.Warn("failed to read ollama.pid", "path", d, "error", err) // No pid, ignore this tmpdir continue } + + pid, err := strconv.Atoi(string(raw)) + if err == nil { + if proc, err := os.FindProcess(pid); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) { + // Another running ollama, ignore this tmpdir + continue + } + } + err = os.RemoveAll(d) if err != nil { slog.Debug("unable to cleanup stale tmpdir", "path", d, "error", err) From 662568d453debcf77d2e077ef98cfb2cfab8575e Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Thu, 20 Jun 2024 09:30:59 -0700 Subject: [PATCH 005/106] err!=nil check --- gpu/assets.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/gpu/assets.go b/gpu/assets.go index e2abfd58f..073d2e813 100644 --- a/gpu/assets.go +++ b/gpu/assets.go @@ -84,16 +84,20 @@ func cleanupTmpDirs() { } pid, err := strconv.Atoi(string(raw)) - if err == nil { - if proc, err := os.FindProcess(pid); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) { - // Another running ollama, ignore this tmpdir - continue - } + if err != nil { + slog.Warn("failed to parse pid", "path", d, "error", err) + continue } - err = os.RemoveAll(d) - if err != nil { - slog.Debug("unable to cleanup stale tmpdir", "path", d, "error", err) + proc, err := os.FindProcess(pid) + if err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) { + slog.Warn("found running ollama", "pid", pid, "path", d) + // Another running ollama, ignore this tmpdir + continue + } + + if err := os.Remove(d); err != nil { + slog.Warn("unable to cleanup stale tmpdir", "path", d, "error", err) } } } From 8e0641a9bffd0dde96e94a34bb3a4929da66c772 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 20 Jun 2024 09:40:17 -0700 Subject: [PATCH 006/106] handle asymmetric embedding KVs --- llm/ggml.go | 40 +++++++++++++++++++++++++++++++++------- llm/memory.go | 4 ++-- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/llm/ggml.go b/llm/ggml.go index 4d9ba97a8..f02f0ff60 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -69,6 +69,30 @@ func (kv KV) HeadCountKV() uint64 { return 1 } +func (kv KV) EmbeddingHeadCount() uint64 { + if heads := kv.HeadCount(); heads > 0 { + return kv.EmbeddingLength() / kv.HeadCount() + } + + return 0 +} + +func (kv KV) EmbeddingHeadCountK() uint64 { + if k := kv.u64(fmt.Sprintf("%s.attention.key_length", kv.Architecture())); k > 0 { + return k + } + + return kv.EmbeddingHeadCount() +} + +func (kv KV) EmbeddingHeadCountV() uint64 { + if v := kv.u64(fmt.Sprintf("%s.attention.value_length", kv.Architecture())); v > 0 { + return v + } + + return kv.EmbeddingHeadCount() +} + func (kv KV) GQA() uint64 { return kv.HeadCount() / kv.HeadCountKV() } @@ -299,6 +323,9 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui headsKV := llm.KV().HeadCountKV() vocab := uint64(len(llm.KV()["tokenizer.ggml.tokens"].([]any))) + embeddingHeads := llm.KV().EmbeddingHeadCount() + embeddingHeadsK := llm.KV().EmbeddingHeadCountK() + layers := llm.Tensors().Layers() switch llm.KV().Architecture() { @@ -308,7 +335,7 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui partialOffload = 4 * batch * embedding partialOffload += max( // 4*batch*(4+6*embedding+context*(2*heads)+llm.KV().GQA()), - 4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embedding/heads*headsKV), + 4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embeddingHeads*headsKV), 4*batch*(embedding+vocab)+embedding*vocab*105/128, ) @@ -316,15 +343,15 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui // mixtral 8x22b ff := uint64(llm.KV()["llama.feed_forward_length"].(uint32)) partialOffload = max( - 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embedding/heads*headsKV), - 4*(context*batch*heads+context*embedding/heads*headsKV+batch*1024+embedding/heads*headsKV*batch), + 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embeddingHeads*headsKV), + 4*(context*batch*heads+context*embeddingHeads*headsKV+batch*1024+embeddingHeads*headsKV*batch), ) } else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok { // mixtral 8x7b ffnGateWeight1 := ffnGateWeight.Shape[1] fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1) partialOffload = max( - 4*batch*(3+embedding/heads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16, + 4*batch*(3+embeddingHeads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16, 4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16), ) } @@ -368,15 +395,14 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui fullOffload, ) case "deepseek2": - keys := uint64(llm.KV()["deepseek2.attention.key_length"].(uint32)) fullOffload = max( 4*batch*(3*embedding+vocab), - 4*batch*(3*embedding+2+context*(1+headsKV)+2*keys*headsKV), + 4*batch*(3*embedding+2+context*(1+headsKV)+2*embeddingHeadsK*headsKV), ) partialOffload = max( 4*batch*(3*embedding+vocab)+embedding*vocab*105/128, - 4*batch*(2*embedding+1+2*keys*headsKV+context+context*headsKV)+4*keys*context*headsKV+embedding*keys*headsKV*9/16, + 4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16, ) } diff --git a/llm/memory.go b/llm/memory.go index b8b862bd6..19b12cbfc 100644 --- a/llm/memory.go +++ b/llm/memory.go @@ -115,8 +115,8 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts slog.Warn("model missing blk.0 layer size") } - // fp16 k,v = (1 (k) + 1 (v)) * sizeof(float16) * n_ctx * n_layer * n_embd / n_head * n_head_kv - var kv uint64 = 2 * 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * ggml.KV().EmbeddingLength() / ggml.KV().HeadCount() * ggml.KV().HeadCountKV() + // fp16 k,v = sizeof(float16) * n_ctx * n_layer * (n_embd_head_k + n_embd_head_v) * n_head_kv + var kv uint64 = 2 * uint64(opts.NumCtx) * ggml.KV().BlockCount() * (ggml.KV().EmbeddingHeadCountK() + ggml.KV().EmbeddingHeadCountV()) * ggml.KV().HeadCountKV() // KV is proportional to the number of layers layerSize += kv / ggml.KV().BlockCount() From 5bf5aeec0140a70eeb94b65c61dbb3b75ff33e56 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Thu, 20 Jun 2024 11:07:04 -0700 Subject: [PATCH 007/106] Refine mmap default logic on linux If we try to use mmap when the model is larger than the system free space, loading is slower than the no-mmap approach. --- llm/server.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/llm/server.go b/llm/server.go index ed5f288f2..da83416ee 100644 --- a/llm/server.go +++ b/llm/server.go @@ -81,7 +81,17 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr var err error var cpuRunner string var estimate MemoryEstimate - var systemMemory uint64 + var systemTotalMemory uint64 + var systemFreeMemory uint64 + + systemMemInfo, err := gpu.GetCPUMem() + if err != nil { + slog.Error("failed to lookup system memory", "error", err) + } else { + systemTotalMemory = systemMemInfo.TotalMemory + systemFreeMemory = systemMemInfo.FreeMemory + slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory) + } // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info if opts.NumGPU == 0 { @@ -91,19 +101,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr cpuRunner = serverForCpu() estimate = EstimateGPULayers(gpus, ggml, projectors, opts) } else { - if gpus[0].Library == "metal" { - memInfo, err := gpu.GetCPUMem() - if err != nil { - slog.Error("failed to lookup system memory", "error", err) - } else { - systemMemory = memInfo.TotalMemory - slog.Debug("system memory", "total", format.HumanBytes2(systemMemory)) - } - } estimate = EstimateGPULayers(gpus, ggml, projectors, opts) switch { - case gpus[0].Library == "metal" && estimate.VRAMSize > systemMemory: + case gpus[0].Library == "metal" && estimate.VRAMSize > systemTotalMemory: // disable partial offloading when model is greater than total system memory as this // can lead to locking up the system opts.NumGPU = 0 @@ -211,7 +212,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } // Windows CUDA should not use mmap for best performance - if (runtime.GOOS == "windows" && gpus[0].Library == "cuda") || opts.UseMMap == api.TriStateFalse { + // Linux with a model larger than free space, mmap leads to thrashing + if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) || + (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) || + opts.UseMMap == api.TriStateFalse { params = append(params, "--no-mmap") } From 7e7749224c57ea4d7ae98e4d07dcb00e192a5c7c Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 21 Jun 2024 12:27:19 -0700 Subject: [PATCH 008/106] Fix use_mmap parsing for modelfiles Add the new tristate parsing logic for the code path for modelfiles, as well as a unit test. --- api/types.go | 13 ++++++++++ api/types_test.go | 63 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) diff --git a/api/types.go b/api/types.go index 0a1189e70..95ed5d37e 100644 --- a/api/types.go +++ b/api/types.go @@ -608,6 +608,19 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) { } else { field := valueOpts.FieldByName(opt.Name) if field.IsValid() && field.CanSet() { + if reflect.PointerTo(field.Type()) == reflect.TypeOf((*TriState)(nil)) { + boolVal, err := strconv.ParseBool(vals[0]) + if err != nil { + return nil, fmt.Errorf("invalid bool value %s", vals) + } + if boolVal { + out[key] = TriStateTrue + } else { + out[key] = TriStateFalse + } + continue + } + switch field.Kind() { case reflect.Float32: floatVal, err := strconv.ParseFloat(vals[0], 32) diff --git a/api/types_test.go b/api/types_test.go index 7b4a0f83c..8b6c60c62 100644 --- a/api/types_test.go +++ b/api/types_test.go @@ -2,6 +2,7 @@ package api import ( "encoding/json" + "fmt" "math" "testing" "time" @@ -141,3 +142,65 @@ func TestUseMmapParsingFromJSON(t *testing.T) { }) } } + +func TestUseMmapFormatParams(t *testing.T) { + tests := []struct { + name string + req map[string][]string + exp TriState + err error + }{ + { + name: "True", + req: map[string][]string{ + "use_mmap": []string{"true"}, + }, + exp: TriStateTrue, + err: nil, + }, + { + name: "False", + req: map[string][]string{ + "use_mmap": []string{"false"}, + }, + exp: TriStateFalse, + err: nil, + }, + { + name: "Numeric True", + req: map[string][]string{ + "use_mmap": []string{"1"}, + }, + exp: TriStateTrue, + err: nil, + }, + { + name: "Numeric False", + req: map[string][]string{ + "use_mmap": []string{"0"}, + }, + exp: TriStateFalse, + err: nil, + }, + { + name: "invalid string", + req: map[string][]string{ + "use_mmap": []string{"foo"}, + }, + exp: TriStateUndefined, + err: fmt.Errorf("invalid bool value [foo]"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + resp, err := FormatParams(test.req) + require.Equal(t, err, test.err) + respVal, ok := resp["use_mmap"] + if test.exp != TriStateUndefined { + assert.True(t, ok, "resp: %v", resp) + assert.Equal(t, test.exp, respVal) + } + }) + } +} From e835ef183691db1cc7da30cfc61fb4b96b321e80 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 21 Jun 2024 13:30:43 -0700 Subject: [PATCH 009/106] fix: quantization with template --- server/images.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/server/images.go b/server/images.go index 53a957715..98794149e 100644 --- a/server/images.go +++ b/server/images.go @@ -414,17 +414,22 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio return err } - layers, err := parseFromFile(ctx, temp, "", fn) + layer, err := NewLayer(temp, baseLayer.MediaType) if err != nil { return err } - if len(layers) != 1 { - return errors.New("quantization failed") + if _, err := temp.Seek(0, io.SeekStart); err != nil { + return err } - baseLayer.Layer = layers[0].Layer - baseLayer.GGML = layers[0].GGML + ggml, _, err := llm.DecodeGGML(temp) + if err != nil { + return err + } + + baseLayer.Layer = layer + baseLayer.GGML = ggml } } From 17b7186cd759337fa98b626e82de150f3789b040 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 6 May 2024 17:47:52 -0700 Subject: [PATCH 010/106] Enable concurrency by default This adjusts our default settings to enable multiple models and parallel requests to a single model. Users can still override these by the same env var settings as before. Parallel has a direct impact on num_ctx, which in turn can have a significant impact on small VRAM GPUs so this change also refines the algorithm so that when parallel is not explicitly set by the user, we try to find a reasonable default that fits the model on their GPU(s). As before, multiple models will only load concurrently if they fully fit in VRAM. --- envconfig/config.go | 16 ++++---- llm/server.go | 13 ++---- server/sched.go | 98 +++++++++++++++++++++++++++++++++----------- server/sched_test.go | 80 +++++++++++++++++++++++------------- 4 files changed, 135 insertions(+), 72 deletions(-) diff --git a/envconfig/config.go b/envconfig/config.go index e86f72e6a..cb456448c 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -85,13 +85,13 @@ func AsMap() map[string]EnvVar { "OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"}, "OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"}, "OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"}, - "OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"}, + "OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU (default 4)"}, "OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"}, "OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"}, "OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"}, "OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"}, "OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"}, - "OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"}, + "OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests"}, "OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"}, "OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"}, "OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"}, @@ -129,8 +129,8 @@ func clean(key string) string { func init() { // default values - NumParallel = 1 - MaxRunners = 1 + NumParallel = 0 + MaxRunners = 4 MaxQueuedRequests = 512 LoadConfig() @@ -205,8 +205,8 @@ func LoadConfig() { if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" { val, err := strconv.Atoi(onp) - if err != nil || val <= 0 { - slog.Error("invalid setting must be greater than zero", "OLLAMA_NUM_PARALLEL", onp, "error", err) + if err != nil { + slog.Error("invalid setting, ignoring", "OLLAMA_NUM_PARALLEL", onp, "error", err) } else { NumParallel = val } @@ -251,7 +251,7 @@ func LoadConfig() { if maxRunners != "" { m, err := strconv.Atoi(maxRunners) if err != nil { - slog.Error("invalid setting", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err) + slog.Error("invalid setting, ignoring", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err) } else { MaxRunners = m } @@ -260,7 +260,7 @@ func LoadConfig() { if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" { p, err := strconv.Atoi(onp) if err != nil || p <= 0 { - slog.Error("invalid setting", "OLLAMA_MAX_QUEUE", onp, "error", err) + slog.Error("invalid setting, ignoring", "OLLAMA_MAX_QUEUE", onp, "error", err) } else { MaxQueuedRequests = p } diff --git a/llm/server.go b/llm/server.go index da83416ee..3cb5ac1f0 100644 --- a/llm/server.go +++ b/llm/server.go @@ -77,7 +77,7 @@ func LoadModel(model string) (*GGML, error) { // NewLlamaServer will run a server for the given GPUs // The gpu list must be a single family. -func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) { +func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) { var err error var cpuRunner string var estimate MemoryEstimate @@ -213,8 +213,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr // Windows CUDA should not use mmap for best performance // Linux with a model larger than free space, mmap leads to thrashing + // For CPU loads we want the memory to be allocated, not FS cache if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) || (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) || + (gpus[0].Library == "cpu" && opts.UseMMap == api.TriStateUndefined) || opts.UseMMap == api.TriStateFalse { params = append(params, "--no-mmap") } @@ -227,15 +229,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--numa") } - numParallel := envconfig.NumParallel - - // TODO (jmorganca): multimodal models don't support parallel yet - // see https://github.com/ollama/ollama/issues/4165 - if len(projectors) > 0 { - numParallel = 1 - slog.Warn("multimodal models don't support parallel requests yet") - } - params = append(params, "--parallel", fmt.Sprintf("%d", numParallel)) if estimate.TensorSplit != "" { diff --git a/server/sched.go b/server/sched.go index 424395544..31ef560f5 100644 --- a/server/sched.go +++ b/server/sched.go @@ -23,6 +23,7 @@ type LlmRequest struct { ctx context.Context //nolint:containedctx model *Model opts api.Options + origNumCTX int // Track the initial ctx request sessionDuration time.Duration successCh chan *runnerRef errCh chan error @@ -38,8 +39,8 @@ type Scheduler struct { loaded map[string]*runnerRef loadedMu sync.Mutex - loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) - newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) + loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int) + newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) getGpuFn func() gpu.GpuInfoList getCpuFn func() gpu.GpuInfoList reschedDelay time.Duration @@ -65,13 +66,10 @@ func InitScheduler(ctx context.Context) *Scheduler { // context must be canceled to decrement ref count and release the runner func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration time.Duration) (chan *runnerRef, chan error) { - // allocate a large enough kv cache for all parallel requests if opts.NumCtx < 4 { opts.NumCtx = 4 } - opts.NumCtx *= envconfig.NumParallel - req := &LlmRequest{ ctx: c, model: model, @@ -102,6 +100,7 @@ func (s *Scheduler) Run(ctx context.Context) { } func (s *Scheduler) processPending(ctx context.Context) { + maxRunnerFactor := 1 // number of GPUs or 1 for { select { case <-ctx.Done(): @@ -110,11 +109,25 @@ func (s *Scheduler) processPending(ctx context.Context) { case pending := <-s.pendingReqCh: // Block other requests until we get this pending request running pending.schedAttempts++ + if pending.origNumCTX == 0 { + pending.origNumCTX = pending.opts.NumCtx + } if pending.ctx.Err() != nil { slog.Debug("pending request cancelled or timed out, skipping scheduling") continue } + numParallel := envconfig.NumParallel + // TODO (jmorganca): multimodal models don't support parallel yet + // see https://github.com/ollama/ollama/issues/4165 + if len(pending.model.ProjectorPaths) > 0 && numParallel != 1 { + numParallel = 1 + slog.Warn("multimodal models don't support parallel requests yet") + } + // Keep NumCtx and numParallel in sync + if numParallel > 1 { + pending.opts.NumCtx = pending.origNumCTX * numParallel + } for { var runnerToExpire *runnerRef @@ -130,7 +143,7 @@ func (s *Scheduler) processPending(ctx context.Context) { pending.useLoadedRunner(runner, s.finishedReqCh) break } - } else if envconfig.MaxRunners > 0 && loadedCount >= envconfig.MaxRunners { + } else if envconfig.MaxRunners > 0 && loadedCount >= (maxRunnerFactor*envconfig.MaxRunners) { slog.Debug("max runners achieved, unloading one to make room", "runner_count", loadedCount) runnerToExpire = s.findRunnerToUnload() } else { @@ -142,6 +155,7 @@ func (s *Scheduler) processPending(ctx context.Context) { } else { gpus = s.getGpuFn() } + maxRunnerFactor = max(len(gpus), 1) // Load model for fitting ggml, err := llm.LoadModel(pending.model.ModelPath) @@ -152,26 +166,32 @@ func (s *Scheduler) processPending(ctx context.Context) { // Evaluate if the model will fit in the available system memory, or if we should unload a model first if len(gpus) == 1 && gpus[0].Library == "cpu" { + // simplifying assumption of defaultParallel when in CPU mode + if numParallel <= 0 { + numParallel = defaultParallel + pending.opts.NumCtx = pending.origNumCTX * numParallel + } + if loadedCount == 0 { slog.Debug("cpu mode with first model, loading") - s.loadFn(pending, ggml, gpus) + s.loadFn(pending, ggml, gpus, numParallel) break } runnerToExpire = s.maybeFindCPURunnerToUnload(pending, ggml, gpus) if runnerToExpire == nil { slog.Debug("cpu mode with available system memory or first model, loading") - s.loadFn(pending, ggml, gpus) + s.loadFn(pending, ggml, gpus, numParallel) break } // else we need to expire a runner } else if loadedCount == 0 { // No models loaded. Load the model but prefer the best fit. slog.Debug("loading first model", "model", pending.model.ModelPath) - g := pickBestFitGPUs(pending, ggml, gpus) + g := pickBestFitGPUs(pending, ggml, gpus, &numParallel) if g != nil { gpus = g } - s.loadFn(pending, ggml, gpus) + s.loadFn(pending, ggml, gpus, numParallel) break } @@ -186,10 +206,10 @@ func (s *Scheduler) processPending(ctx context.Context) { // Update free memory from currently loaded models s.updateFreeSpace(availGpus) - fitGpus := pickBestFitGPUs(pending, ggml, availGpus) + fitGpus := pickBestFitGPUs(pending, ggml, availGpus, &numParallel) if fitGpus != nil { slog.Debug("new model fits with existing models, loading") - s.loadFn(pending, ggml, fitGpus) + s.loadFn(pending, ggml, fitGpus, numParallel) break } @@ -350,8 +370,11 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm }() } -func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) { - llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts) +func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int) { + if numParallel < 1 { + numParallel = 1 + } + llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts, numParallel) if err != nil { // some older models are not compatible with newer versions of llama.cpp // show a generalized compatibility error until there is a better way to @@ -375,6 +398,7 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) loading: true, refCount: 1, } + runner.numParallel = numParallel runner.refMu.Lock() s.loadedMu.Lock() @@ -483,8 +507,9 @@ type runnerRef struct { expireTimer *time.Timer expiresAt time.Time - model *Model - modelPath string + model *Model + modelPath string + numParallel int *api.Options } @@ -525,6 +550,9 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool optsNew.NumGPU = -1 } + // Normalize the NumCtx for parallelism + optsExisting.NumCtx = optsExisting.NumCtx / runner.numParallel + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed? @@ -611,22 +639,38 @@ func (a ByDuration) Less(i, j int) bool { // pickBestFitGPUs will try to find the optimal placement of the model in the available GPUs where the model fully fits // If the model can not be fit fully within the available GPU(s) nil is returned -func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) gpu.GpuInfoList { +// If numParallel is <= 0, this will attempt try to optimize parallism based on available VRAM, and adjust +// opts.NumCtx accordingly +func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel *int) gpu.GpuInfoList { var estimatedVRAM uint64 + + var numParallelToTry []int + if *numParallel <= 0 { + // If no specific parallel setting was provided, try larger then smaller, always end with 1 + numParallelToTry = append(numParallelToTry, 4, 1) + } else { + numParallelToTry = []int{*numParallel} + } + for _, gl := range gpus.ByLibrary() { var ok bool sgl := append(make(gpu.GpuInfoList, 0, len(gl)), gl...) // TODO - potentially sort by performance capability, existing models loaded, etc. + // TODO - Eliminate any GPUs that already have envconfig.MaxRunners loaded on them // Note: at present, this will favor more VRAM over faster GPU speed in mixed setups sort.Sort(sort.Reverse(gpu.ByFreeMemory(sgl))) // First attempt to fit the model into a single GPU - if !envconfig.SchedSpread { - for _, g := range sgl { - if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok { - slog.Debug("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM)) - return []gpu.GpuInfo{g} + for _, p := range numParallelToTry { + req.opts.NumCtx = req.origNumCTX * p + if !envconfig.SchedSpread { + for _, g := range sgl { + if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok { + slog.Info("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "parallel", p, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM)) + *numParallel = p + return []gpu.GpuInfo{g} + } } } } @@ -636,9 +680,13 @@ func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) gpu. // - try subsets of GPUs instead of just falling back to 1 or all in a family // Now try all the GPUs - if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok { - slog.Debug("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "required", format.HumanBytes2(estimatedVRAM)) - return sgl + for _, p := range numParallelToTry { + req.opts.NumCtx = req.origNumCTX * p + if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok { + slog.Info("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "parallel", p, "required", format.HumanBytes2(estimatedVRAM)) + *numParallel = p + return sgl + } } } return nil diff --git a/server/sched_test.go b/server/sched_test.go index 953288347..5e5913a7c 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -47,11 +47,11 @@ func TestLoad(t *testing.T) { sessionDuration: 2, } // Fail to load model first - s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) { + s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { return nil, fmt.Errorf("something failed to load model blah") } gpus := gpu.GpuInfoList{} - s.load(req, ggml, gpus) + s.load(req, ggml, gpus, 0) require.Empty(t, req.successCh) require.Len(t, req.errCh, 1) s.loadedMu.Lock() @@ -61,10 +61,10 @@ func TestLoad(t *testing.T) { require.Contains(t, err.Error(), "this model may be incompatible") server := &mockLlm{estimatedVRAM: 10, estimatedVRAMByGPU: map[string]uint64{}} - s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) { + s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { return server, nil } - s.load(req, ggml, gpus) + s.load(req, ggml, gpus, 0) select { case err := <-req.errCh: require.NoError(t, err) @@ -78,12 +78,12 @@ func TestLoad(t *testing.T) { req.model.ModelPath = "dummy_model_path" server.waitResp = fmt.Errorf("wait failure") - s.load(req, ggml, gpus) + s.load(req, ggml, gpus, 0) select { case err := <-req.errCh: require.Contains(t, err.Error(), "wait failure") case resp := <-req.successCh: - t.Errorf("unexpected success %v", resp) + t.Fatalf("unexpected success %v", resp) } s.loadedMu.Lock() runner := s.loaded["dummy_model_path"] @@ -102,7 +102,7 @@ type bundle struct { ggml *llm.GGML } -func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) { +func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { return scenario.srv, nil } @@ -200,7 +200,7 @@ func TestRequests(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, scenario1a.req.errCh) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } // Same runner as first request due to not needing a reload @@ -213,7 +213,7 @@ func TestRequests(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, scenario1b.req.errCh) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } // Trigger a reload @@ -231,7 +231,7 @@ func TestRequests(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, scenario2a.req.errCh) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } envconfig.MaxRunners = 1 @@ -247,7 +247,7 @@ func TestRequests(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3a.req.errCh) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } s.loadedMu.Lock() require.Len(t, s.loaded, 1) @@ -263,7 +263,7 @@ func TestRequests(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3b.req.errCh) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } s.loadedMu.Lock() require.Len(t, s.loaded, 2) @@ -279,7 +279,7 @@ func TestRequests(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3c.req.errCh) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } s.loadedMu.Lock() require.Len(t, s.loaded, 3) @@ -306,7 +306,7 @@ func TestRequests(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3d.req.errCh) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } s.loadedMu.Lock() require.Len(t, s.loaded, 2) @@ -349,7 +349,7 @@ func TestGetRunner(t *testing.T) { require.Empty(t, s.pendingReqCh) require.Empty(t, errCh1a) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } scenario1a.ctxDone() s.loadedMu.Lock() @@ -400,7 +400,7 @@ func TestPrematureExpired(t *testing.T) { slog.Info("sending premature expired event now") s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } time.Sleep(scenario1a.req.sessionDuration) scenario1a.ctxDone() @@ -427,7 +427,7 @@ func TestUseLoadedRunner(t *testing.T) { } finished := make(chan *LlmRequest) llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}} - r1 := &runnerRef{llama: llm1, sessionDuration: 1} + r1 := &runnerRef{llama: llm1, sessionDuration: 1, numParallel: 1} req.useLoadedRunner(r1, finished) require.Equal(t, uint(1), r1.refCount) require.Equal(t, time.Duration(2), r1.sessionDuration) @@ -435,7 +435,7 @@ func TestUseLoadedRunner(t *testing.T) { case success := <-req.successCh: require.Equal(t, r1, success) case <-ctx.Done(): - t.Errorf("timeout") + t.Fatal("timeout") } done() fin := <-finished @@ -461,8 +461,8 @@ func TestUpdateFreeSpace(t *testing.T) { gpus[1].FreeMemory = 1900 llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 50, "2": 50}} llm2 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 125, "2": 75}} - r1 := &runnerRef{llama: llm1, gpus: gpus} - r2 := &runnerRef{llama: llm2, gpus: gpus} + r1 := &runnerRef{llama: llm1, gpus: gpus, numParallel: 1} + r2 := &runnerRef{llama: llm2, gpus: gpus, numParallel: 1} s := InitScheduler(ctx) s.loadedMu.Lock() @@ -513,8 +513,8 @@ func TestFindRunnerToUnload(t *testing.T) { ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond) defer done() - r1 := &runnerRef{refCount: 1, sessionDuration: 1} - r2 := &runnerRef{sessionDuration: 2} + r1 := &runnerRef{refCount: 1, sessionDuration: 1, numParallel: 1} + r2 := &runnerRef{sessionDuration: 2, numParallel: 1} s := InitScheduler(ctx) s.loadedMu.Lock() @@ -536,9 +536,13 @@ func TestNeedsReload(t *testing.T) { llm := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}} do := api.DefaultOptions() runner := &runnerRef{ - model: &Model{AdapterPaths: []string{"adapter1"}, ProjectorPaths: []string{"projector1"}}, - Options: &do, - llama: llm, + model: &Model{ + AdapterPaths: []string{"adapter1"}, + ProjectorPaths: []string{"projector1"}, + }, + Options: &do, + llama: llm, + numParallel: 1, } req := &LlmRequest{ model: &Model{ @@ -581,8 +585,8 @@ func TestUnloadAllRunners(t *testing.T) { s := InitScheduler(ctx) s.unloadAllRunners() - r1 := &runnerRef{llama: llm1} - r2 := &runnerRef{llama: llm2} + r1 := &runnerRef{llama: llm1, numParallel: 1} + r2 := &runnerRef{llama: llm2, numParallel: 1} s.loadedMu.Lock() s.loaded["a"] = r1 @@ -596,14 +600,32 @@ func TestUnloadAllRunners(t *testing.T) { func TestUnload(t *testing.T) { llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}} - r1 := &runnerRef{llama: llm1} - r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}} + r1 := &runnerRef{llama: llm1, numParallel: 1} + r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}, numParallel: 1} r1.unload() require.True(t, llm1.closeCalled) r2.unload() require.Nil(t, r2.model) } +func TestAlreadyCanceled(t *testing.T) { + ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer done() + dctx, done2 := context.WithCancel(ctx) + done2() + scenario1a := newScenario(t, dctx, "ollama-model-1", 10) + scenario1a.req.sessionDuration = 0 + s := InitScheduler(ctx) + slog.Info("scenario1a") + s.pendingReqCh <- scenario1a.req + require.Len(t, s.pendingReqCh, 1) + s.Run(ctx) + time.Sleep(5 * time.Millisecond) + require.Empty(t, s.pendingReqCh) + require.Empty(t, scenario1a.req.errCh) + require.Empty(t, scenario1a.req.successCh) +} + type mockLlm struct { pingResp error waitResp error From 9929751cc8b415e7b83d5151742dad734e8b5efc Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 19 Jun 2024 13:35:38 -0700 Subject: [PATCH 011/106] Disable concurrency for AMD + Windows Until ROCm v6.2 ships, we wont be able to get accurate free memory reporting on windows, which makes automatic concurrency too risky. Users can still opt-in but will need to pay attention to model sizes otherwise they may thrash/page VRAM or cause OOM crashes. All other platforms and GPUs have accurate VRAM reporting wired up now, so we can turn on concurrency by default. --- envconfig/config.go | 8 ++++---- gpu/amd_windows.go | 5 +++-- gpu/types.go | 5 +++++ server/sched.go | 36 ++++++++++++++++++++++++++++++++---- 4 files changed, 44 insertions(+), 10 deletions(-) diff --git a/envconfig/config.go b/envconfig/config.go index cb456448c..0f0f7f058 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -85,13 +85,13 @@ func AsMap() map[string]EnvVar { "OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"}, "OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"}, "OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"}, - "OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU (default 4)"}, + "OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU (default auto)"}, "OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"}, "OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"}, "OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"}, "OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"}, "OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"}, - "OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests"}, + "OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default auto)"}, "OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"}, "OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"}, "OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"}, @@ -129,8 +129,8 @@ func clean(key string) string { func init() { // default values - NumParallel = 0 - MaxRunners = 4 + NumParallel = 0 // Autoselect + MaxRunners = 0 // Autoselect MaxQueuedRequests = 512 LoadConfig() diff --git a/gpu/amd_windows.go b/gpu/amd_windows.go index 21585277a..8b6fabebb 100644 --- a/gpu/amd_windows.go +++ b/gpu/amd_windows.go @@ -115,8 +115,6 @@ func AMDGetGPUInfo() []RocmGPUInfo { continue } - // TODO revisit this once ROCm v6 is available on windows. - // v5.7 only reports VRAM used by this process, so it's completely wrong and unusable slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory)) slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory)) gpuInfo := RocmGPUInfo{ @@ -126,6 +124,9 @@ func AMDGetGPUInfo() []RocmGPUInfo { TotalMemory: totalMemory, FreeMemory: freeMemory, }, + // Free memory reporting on Windows is not reliable until we bump to ROCm v6.2 + UnreliableFreeMemory: true, + ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices DependencyPath: libDir, MinimumMemory: rocmMinimumMemory, diff --git a/gpu/types.go b/gpu/types.go index 9920db5ff..2eaa9bae9 100644 --- a/gpu/types.go +++ b/gpu/types.go @@ -29,6 +29,11 @@ type GpuInfo struct { // Extra environment variables specific to the GPU as list of [key,value] EnvWorkarounds [][2]string `json:"envs,omitempty"` + // Set to true if we can NOT reliably discover FreeMemory. A value of true indicates + // the FreeMemory is best effort, and may over or under report actual memory usage + // False indicates FreeMemory can generally be trusted on this GPU + UnreliableFreeMemory bool + // GPU information ID string `json:"gpu_id"` // string to use for selection of this specific GPU Name string `json:"name"` // user friendly name if available diff --git a/server/sched.go b/server/sched.go index 31ef560f5..de8c9d281 100644 --- a/server/sched.go +++ b/server/sched.go @@ -46,6 +46,16 @@ type Scheduler struct { reschedDelay time.Duration } +// Default automatic value for number of models we allow per GPU +// Model will still need to fit in VRAM, but loading many small models +// on a large GPU can cause stalling +var defaultModelsPerGPU = 3 + +// Default automatic value for parallel setting +// Model will still need to fit in VRAM. If this setting wont fit +// we'll back off down to 1 to try to get it to fit +var defaultParallel = 4 + var ErrMaxQueue = fmt.Errorf("server busy, please try again. maximum pending requests exceeded") func InitScheduler(ctx context.Context) *Scheduler { @@ -100,7 +110,6 @@ func (s *Scheduler) Run(ctx context.Context) { } func (s *Scheduler) processPending(ctx context.Context) { - maxRunnerFactor := 1 // number of GPUs or 1 for { select { case <-ctx.Done(): @@ -143,7 +152,7 @@ func (s *Scheduler) processPending(ctx context.Context) { pending.useLoadedRunner(runner, s.finishedReqCh) break } - } else if envconfig.MaxRunners > 0 && loadedCount >= (maxRunnerFactor*envconfig.MaxRunners) { + } else if envconfig.MaxRunners > 0 && loadedCount >= envconfig.MaxRunners { slog.Debug("max runners achieved, unloading one to make room", "runner_count", loadedCount) runnerToExpire = s.findRunnerToUnload() } else { @@ -155,7 +164,26 @@ func (s *Scheduler) processPending(ctx context.Context) { } else { gpus = s.getGpuFn() } - maxRunnerFactor = max(len(gpus), 1) + + if envconfig.MaxRunners <= 0 { + // No user specified MaxRunners, so figure out what automatic setting to use + // If all GPUs have reliable free memory reporting, defaultModelsPerGPU * the number of GPUs + // if any GPU has unreliable free memory reporting, 1x the number of GPUs + allReliable := true + for _, gpu := range gpus { + if gpu.UnreliableFreeMemory { + allReliable = false + break + } + } + if allReliable { + envconfig.MaxRunners = defaultModelsPerGPU * len(gpus) + slog.Debug("updating default concurrency", "OLLAMA_MAX_LOADED_MODELS", envconfig.MaxRunners, "gpu_count", len(gpus)) + } else { + slog.Info("one or more GPUs detected that are unable to accurately report free memory - disabling default concurrency") + envconfig.MaxRunners = len(gpus) + } + } // Load model for fitting ggml, err := llm.LoadModel(pending.model.ModelPath) @@ -647,7 +675,7 @@ func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numP var numParallelToTry []int if *numParallel <= 0 { // If no specific parallel setting was provided, try larger then smaller, always end with 1 - numParallelToTry = append(numParallelToTry, 4, 1) + numParallelToTry = append(numParallelToTry, defaultParallel, 1) } else { numParallelToTry = []int{*numParallel} } From 9a9e7d83c416374782a984d7036f3f2ae5ddb78d Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Fri, 21 Jun 2024 15:52:09 -0700 Subject: [PATCH 012/106] Docs (#5149) --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index 2fdc63cb3..978625731 100644 --- a/README.md +++ b/README.md @@ -182,6 +182,12 @@ $ ollama run llama3 "Summarize this file: $(cat README.md)" Ollama is a lightweight, extensible framework for building and running language models on the local machine. It provides a simple API for creating, running, and managing models, as well as a library of pre-built models that can be easily used in a variety of applications. ``` +### Show model information + +``` +ollama show llama3 +``` + ### List models on your computer ``` From 642cee13426c994f90d5a95376025fe9a223891a Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 21 Jun 2024 15:59:41 -0700 Subject: [PATCH 013/106] Sort the ps output Provide consistent ordering for the ps command - longest duration listed first --- server/routes.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/routes.go b/server/routes.go index 3d112e9f1..a7f72edc2 100644 --- a/server/routes.go +++ b/server/routes.go @@ -1224,6 +1224,11 @@ func (s *Server) ProcessHandler(c *gin.Context) { models = append(models, mr) } + slices.SortStableFunc(models, func(i, j api.ProcessModelResponse) int { + // longest duration remaining listed first + return cmp.Compare(j.ExpiresAt.Unix(), i.ExpiresAt.Unix()) + }) + c.JSON(http.StatusOK, api.ProcessResponse{Models: models}) } From 2aa91a937ba199ae5832c71ecc10221cc6420fa8 Mon Sep 17 00:00:00 2001 From: Blake Mizerany Date: Mon, 24 Jun 2024 20:14:03 -0700 Subject: [PATCH 014/106] cmd: defer stating model info until necessary (#5248) This commit changes the 'ollama run' command to defer fetching model information until it really needs it. That is, when in interactive mode. It also removes one such case where the model information is fetch in duplicate, just before calling generateInteractive and then again, first thing, in generateInteractive. This positively impacts the performance of the command: ; time ./before run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat? ./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.168 total ; time ./before run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat? ./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.220 total ; time ./before run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat? ./before run llama3 'hi' 0.02s user 0.01s system 2% cpu 1.217 total ; time ./after run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat? ./after run llama3 'hi' 0.02s user 0.01s system 4% cpu 0.652 total ; time ./after run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat? ./after run llama3 'hi' 0.01s user 0.01s system 5% cpu 0.498 total ; time ./after run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with or would you like to chat? ./after run llama3 'hi' 0.01s user 0.01s system 3% cpu 0.479 total ; time ./after run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat? ./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total ; time ./after run llama3 'hi' Hi! It's nice to meet you. Is there something I can help you with, or would you like to chat? ./after run llama3 'hi' 0.02s user 0.01s system 5% cpu 0.507 total --- cmd/cmd.go | 65 +++++++++++++++++++++++----------------------- cmd/interactive.go | 51 ++++++++++-------------------------- 2 files changed, 46 insertions(+), 70 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index 68197f72d..89b551f40 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -287,38 +287,12 @@ func createBlob(cmd *cobra.Command, client *api.Client, path string) (string, er } func RunHandler(cmd *cobra.Command, args []string) error { - client, err := api.ClientFromEnvironment() - if err != nil { - return err - } - - name := args[0] - - // check if the model exists on the server - show, err := client.Show(cmd.Context(), &api.ShowRequest{Name: name}) - var statusError api.StatusError - switch { - case errors.As(err, &statusError) && statusError.StatusCode == http.StatusNotFound: - if err := PullHandler(cmd, []string{name}); err != nil { - return err - } - - show, err = client.Show(cmd.Context(), &api.ShowRequest{Name: name}) - if err != nil { - return err - } - case err != nil: - return err - } - interactive := true opts := runOptions{ - Model: args[0], - WordWrap: os.Getenv("TERM") == "xterm-256color", - Options: map[string]interface{}{}, - MultiModal: slices.Contains(show.Details.Families, "clip"), - ParentModel: show.Details.ParentModel, + Model: args[0], + WordWrap: os.Getenv("TERM") == "xterm-256color", + Options: map[string]interface{}{}, } format, err := cmd.Flags().GetString("format") @@ -362,11 +336,38 @@ func RunHandler(cmd *cobra.Command, args []string) error { } opts.WordWrap = !nowrap - if !interactive { - return generate(cmd, opts) + // Fill out the rest of the options based on information about the + // model. + client, err := api.ClientFromEnvironment() + if err != nil { + return err } - return generateInteractive(cmd, opts) + name := args[0] + info, err := func() (*api.ShowResponse, error) { + showReq := &api.ShowRequest{Name: name} + info, err := client.Show(cmd.Context(), showReq) + var se api.StatusError + if errors.As(err, &se) && se.StatusCode == http.StatusNotFound { + if err := PullHandler(cmd, []string{name}); err != nil { + return nil, err + } + return client.Show(cmd.Context(), &api.ShowRequest{Name: name}) + } + return info, err + }() + if err != nil { + return err + } + + opts.MultiModal = slices.Contains(info.Details.Families, "clip") + opts.ParentModel = info.Details.ParentModel + opts.Messages = append(opts.Messages, info.Messages...) + + if interactive { + return generateInteractive(cmd, opts) + } + return generate(cmd, opts) } func errFromUnknownKey(unknownKeyErr error) error { diff --git a/cmd/interactive.go b/cmd/interactive.go index 80a915474..0a2f429b6 100644 --- a/cmd/interactive.go +++ b/cmd/interactive.go @@ -31,65 +31,40 @@ const ( ) func loadModel(cmd *cobra.Command, opts *runOptions) error { - client, err := api.ClientFromEnvironment() - if err != nil { - return err - } - p := progress.NewProgress(os.Stderr) defer p.StopAndClear() spinner := progress.NewSpinner("") p.Add("", spinner) - showReq := api.ShowRequest{Name: opts.Model} - showResp, err := client.Show(cmd.Context(), &showReq) + client, err := api.ClientFromEnvironment() if err != nil { return err } - opts.MultiModal = slices.Contains(showResp.Details.Families, "clip") - opts.ParentModel = showResp.Details.ParentModel - - if len(showResp.Messages) > 0 { - opts.Messages = append(opts.Messages, showResp.Messages...) - } chatReq := &api.ChatRequest{ - Model: opts.Model, - Messages: []api.Message{}, + Model: opts.Model, + KeepAlive: opts.KeepAlive, } - if opts.KeepAlive != nil { - chatReq.KeepAlive = opts.KeepAlive - } - - err = client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error { + return client.Chat(cmd.Context(), chatReq, func(resp api.ChatResponse) error { p.StopAndClear() - if len(opts.Messages) > 0 { - for _, msg := range opts.Messages { - switch msg.Role { - case "user": - fmt.Printf(">>> %s\n", msg.Content) - case "assistant": - state := &displayResponseState{} - displayResponse(msg.Content, opts.WordWrap, state) - fmt.Println() - fmt.Println() - } + for _, msg := range opts.Messages { + switch msg.Role { + case "user": + fmt.Printf(">>> %s\n", msg.Content) + case "assistant": + state := &displayResponseState{} + displayResponse(msg.Content, opts.WordWrap, state) + fmt.Println() + fmt.Println() } } return nil }) - if err != nil { - return err - } - - return nil } func generateInteractive(cmd *cobra.Command, opts runOptions) error { - opts.Messages = make([]api.Message, 0) - err := loadModel(cmd, &opts) if err != nil { return err From cb42e607c5cf4d439ad4d5a93ed13c7d6a09fc34 Mon Sep 17 00:00:00 2001 From: Blake Mizerany Date: Mon, 24 Jun 2024 21:47:52 -0700 Subject: [PATCH 015/106] llm: speed up gguf decoding by a lot (#5246) Previously, some costly things were causing the loading of GGUF files and their metadata and tensor information to be VERY slow: * Too many allocations when decoding strings * Hitting disk for each read of each key and value, resulting in a not-okay amount of syscalls/disk I/O. The show API is now down to 33ms from 800ms+ for llama3 on a macbook pro m3. This commit also prevents collecting large arrays of values when decoding GGUFs (if desired). When such keys are encountered, their values are null, and are encoded as such in JSON. Also, this fixes a broken test that was not encoding valid GGUF. --- llm/ggla.go | 13 ++- llm/ggml.go | 25 ++++-- llm/ggml_test.go | 1 + llm/gguf.go | 130 +++++++++++++++++++-------- llm/memory_test.go | 19 ++-- llm/server.go | 11 ++- server/images.go | 2 +- server/model.go | 6 +- server/routes.go | 19 +++- server/sched.go | 2 +- server/sched_test.go | 6 +- util/bufioutil/buffer_seeker.go | 34 +++++++ util/bufioutil/buffer_seeker_test.go | 64 +++++++++++++ 13 files changed, 263 insertions(+), 69 deletions(-) create mode 100644 llm/ggml_test.go create mode 100644 util/bufioutil/buffer_seeker.go create mode 100644 util/bufioutil/buffer_seeker_test.go diff --git a/llm/ggla.go b/llm/ggla.go index a5d90b6cb..34c4f6ca3 100644 --- a/llm/ggla.go +++ b/llm/ggla.go @@ -53,7 +53,7 @@ func (llm *ggla) Tensors() Tensors { return llm.tensors } -func (llm *ggla) decode(rs io.ReadSeeker) error { +func (llm *ggla) decode(rs io.ReadSeeker) (retErr error) { var r uint32 if err := binary.Read(rs, binary.LittleEndian, &r); err != nil { return err @@ -69,9 +69,18 @@ func (llm *ggla) decode(rs io.ReadSeeker) error { for { var dims uint32 if err := binary.Read(rs, binary.LittleEndian, &dims); err != nil { + if errors.Is(err, io.EOF) { + return nil + } return err } + defer func() { + if errors.Is(retErr, io.EOF) { + retErr = io.ErrUnexpectedEOF + } + }() + var namesize uint32 if err := binary.Read(rs, binary.LittleEndian, &namesize); err != nil { return err @@ -108,7 +117,7 @@ func (llm *ggla) decode(rs io.ReadSeeker) error { return err } - if _, err := rs.Seek((offset+31)&-32, io.SeekStart); err != nil { + if _, err := rs.Seek((offset+31)&-32-offset, io.SeekCurrent); err != nil { return err } diff --git a/llm/ggml.go b/llm/ggml.go index f02f0ff60..d0d0b6ddc 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -6,6 +6,8 @@ import ( "fmt" "io" "strings" + + "github.com/ollama/ollama/util/bufioutil" ) type GGML struct { @@ -278,7 +280,18 @@ func DetectGGMLType(b []byte) string { } } -func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) { +// DecodeGGML decodes a GGML model from the given reader. +// +// It collects array values for arrays with a size less than or equal to +// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If +// the maxArraySize is negative, all arrays are collected. +func DecodeGGML(rs io.ReadSeeker, maxArraySize int) (*GGML, int64, error) { + if maxArraySize == 0 { + maxArraySize = 1024 + } + + rs = bufioutil.NewBufferedSeeker(rs, 32<<10) + var magic uint32 if err := binary.Read(rs, binary.LittleEndian, &magic); err != nil { return nil, 0, err @@ -291,17 +304,15 @@ func DecodeGGML(rs io.ReadSeeker) (*GGML, int64, error) { case FILE_MAGIC_GGLA: c = &containerGGLA{} case FILE_MAGIC_GGUF_LE: - c = &containerGGUF{ByteOrder: binary.LittleEndian} + c = &containerGGUF{ByteOrder: binary.LittleEndian, maxArraySize: maxArraySize} case FILE_MAGIC_GGUF_BE: - c = &containerGGUF{ByteOrder: binary.BigEndian} + c = &containerGGUF{ByteOrder: binary.BigEndian, maxArraySize: maxArraySize} default: return nil, 0, errors.New("invalid file magic") } model, err := c.Decode(rs) - if errors.Is(err, io.EOF) { - // noop - } else if err != nil { + if err != nil { return nil, 0, err } @@ -321,7 +332,7 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui embedding := llm.KV().EmbeddingLength() heads := llm.KV().HeadCount() headsKV := llm.KV().HeadCountKV() - vocab := uint64(len(llm.KV()["tokenizer.ggml.tokens"].([]any))) + vocab := uint64(llm.KV()["tokenizer.ggml.tokens"].(*array).size) embeddingHeads := llm.KV().EmbeddingHeadCount() embeddingHeadsK := llm.KV().EmbeddingHeadCountK() diff --git a/llm/ggml_test.go b/llm/ggml_test.go new file mode 100644 index 000000000..006c3ded8 --- /dev/null +++ b/llm/ggml_test.go @@ -0,0 +1 @@ +package llm diff --git a/llm/gguf.go b/llm/gguf.go index 234efe574..4d343a1bd 100644 --- a/llm/gguf.go +++ b/llm/gguf.go @@ -3,11 +3,10 @@ package llm import ( "bytes" "encoding/binary" + "encoding/json" "fmt" "io" "strings" - - "log/slog" ) type containerGGUF struct { @@ -29,6 +28,12 @@ type containerGGUF struct { NumTensor uint64 NumKV uint64 } + + maxArraySize int +} + +func (c *containerGGUF) canCollectArray(size int) bool { + return c.maxArraySize < 0 || size <= c.maxArraySize } func (c *containerGGUF) Name() string { @@ -54,7 +59,6 @@ func (c *containerGGUF) Decode(rs io.ReadSeeker) (model, error) { } model := newGGUF(c) - slog.Debug(fmt.Sprintf("model = %#v", model)) if err := model.Decode(rs); err != nil { return nil, err } @@ -85,6 +89,8 @@ type gguf struct { tensors []*Tensor parameters uint64 + + scratch [16 << 10]byte } func newGGUF(container *containerGGUF) *gguf { @@ -181,34 +187,34 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error { } // decode tensors - for i := 0; uint64(i) < llm.numTensor(); i++ { + for range llm.numTensor() { name, err := readGGUFString(llm, rs) if err != nil { - return err + return fmt.Errorf("failed to read tensor name: %w", err) } // dims is the number of dimensions in the tensor dims, err := readGGUF[uint32](llm, rs) if err != nil { - return err + return fmt.Errorf("failed to read tensor dimensions: %w", err) } shape := [4]uint64{1, 1, 1, 1} for i := 0; uint32(i) < dims; i++ { shape[i], err = readGGUF[uint64](llm, rs) if err != nil { - return err + return fmt.Errorf("failed to read tensor shape: %w", err) } } kind, err := readGGUF[uint32](llm, rs) if err != nil { - return err + return fmt.Errorf("failed to read tensor kind: %w", err) } offset, err := readGGUF[uint64](llm, rs) if err != nil { - return err + return fmt.Errorf("failed to read tensor offset: %w", err) } tensor := Tensor{ @@ -230,24 +236,19 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error { alignment = 32 } - offset, err := rs.Seek(0, io.SeekCurrent) - if err != nil { - return err - } - - padding := llm.padding(offset, int64(alignment)) - if _, err := rs.Seek(padding, io.SeekCurrent); err != nil { - return err - } - for _, tensor := range llm.tensors { - if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil { - return err + offset, err := rs.Seek(0, io.SeekCurrent) + if err != nil { + return fmt.Errorf("failed to get current offset: %w", err) } - padding := llm.padding(int64(tensor.Size()), int64(alignment)) + padding := llm.padding(offset, int64(alignment)) if _, err := rs.Seek(padding, io.SeekCurrent); err != nil { - return err + return fmt.Errorf("failed to seek to init padding: %w", err) + } + + if _, err := rs.Seek(int64(tensor.Size()), io.SeekCurrent); err != nil { + return fmt.Errorf("failed to seek to tensor: %w", err) } } @@ -285,22 +286,48 @@ func readGGUFV1String(llm *gguf, r io.Reader) (string, error) { return b.String(), nil } +func discardGGUFString(llm *gguf, r io.Reader) error { + buf := llm.scratch[:8] + _, err := io.ReadFull(r, buf) + if err != nil { + return err + } + + size := int(llm.ByteOrder.Uint64(buf)) + for size > 0 { + n, err := r.Read(llm.scratch[:min(size, cap(llm.scratch))]) + if err != nil { + return err + } + size -= n + } + return nil +} + func readGGUFString(llm *gguf, r io.Reader) (string, error) { if llm.Version == 1 { return readGGUFV1String(llm, r) } - var length uint64 - if err := binary.Read(r, llm.ByteOrder, &length); err != nil { + buf := llm.scratch[:8] + _, err := io.ReadFull(r, buf) + if err != nil { return "", err } - var b bytes.Buffer - if _, err := io.CopyN(&b, r, int64(length)); err != nil { + length := int(llm.ByteOrder.Uint64(buf)) + if length > len(llm.scratch) { + buf = make([]byte, length) + } else { + buf = llm.scratch[:length] + } + clear(buf) + + _, err = io.ReadFull(r, buf) + if err != nil { return "", err } - - return b.String(), nil + return string(buf), nil } func writeGGUFString(llm *gguf, w io.Writer, s string) error { @@ -316,7 +343,16 @@ func writeGGUFString(llm *gguf, w io.Writer, s string) error { return err } -func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) { +type array struct { + size int + values []any +} + +func (a *array) MarshalJSON() ([]byte, error) { + return json.Marshal(a.values) +} + +func readGGUFV1Array(llm *gguf, r io.Reader) (*array, error) { t, err := readGGUF[uint32](llm, r) if err != nil { return nil, err @@ -327,7 +363,12 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) { return nil, err } - for i := 0; uint32(i) < n; i++ { + a := &array{size: int(n)} + if llm.canCollectArray(int(n)) { + a.values = make([]any, 0, int(n)) + } + + for i := range n { var e any switch t { case ggufTypeUint8: @@ -361,13 +402,15 @@ func readGGUFV1Array(llm *gguf, r io.Reader) (a []any, err error) { return nil, err } - a = append(a, e) + if a.values != nil { + a.values[i] = e + } } - return + return a, nil } -func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { +func readGGUFArray(llm *gguf, r io.Reader) (*array, error) { if llm.Version == 1 { return readGGUFV1Array(llm, r) } @@ -382,7 +425,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { return nil, err } - for i := 0; uint64(i) < n; i++ { + a := &array{size: int(n)} + if llm.canCollectArray(int(n)) { + a.values = make([]any, int(n)) + } + + for i := range n { var e any switch t { case ggufTypeUint8: @@ -408,7 +456,11 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { case ggufTypeBool: e, err = readGGUF[bool](llm, r) case ggufTypeString: - e, err = readGGUFString(llm, r) + if a.values != nil { + e, err = readGGUFString(llm, r) + } else { + err = discardGGUFString(llm, r) + } default: return nil, fmt.Errorf("invalid array type: %d", t) } @@ -416,10 +468,12 @@ func readGGUFArray(llm *gguf, r io.Reader) (a []any, err error) { return nil, err } - a = append(a, e) + if a.values != nil { + a.values[i] = e + } } - return + return a, nil } func writeGGUFArray[S ~[]E, E any](llm *gguf, w io.Writer, t uint32, s S) error { diff --git a/llm/memory_test.go b/llm/memory_test.go index 8eaa07715..f972f9275 100644 --- a/llm/memory_test.go +++ b/llm/memory_test.go @@ -22,13 +22,14 @@ func TestEstimateGPULayers(t *testing.T) { defer f.Close() gguf := NewGGUFV3(binary.LittleEndian) inputLayerCount := 5 + tensors := []Tensor{ - {Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, - {Name: "blk.1.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, - {Name: "blk.2.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, - {Name: "blk.3.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, - {Name: "blk.4.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, - {Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, + {Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, + {Name: "blk.1.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, + {Name: "blk.2.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, + {Name: "blk.3.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, + {Name: "blk.4.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, + {Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, } assert.Len(t, tensors, inputLayerCount+1) err = gguf.Encode(f, KV{ @@ -45,8 +46,10 @@ func TestEstimateGPULayers(t *testing.T) { }, tensors) require.NoError(t, err) - ggml, err := LoadModel(f.Name()) - require.NoError(t, err) + ggml, err := LoadModel(f.Name(), 0) + if err != nil { + t.Fatal(err) + } // Simple CPU scenario gpus := []gpu.GpuInfo{ diff --git a/llm/server.go b/llm/server.go index da83416ee..ad67138b5 100644 --- a/llm/server.go +++ b/llm/server.go @@ -60,7 +60,12 @@ type llmServer struct { sem *semaphore.Weighted } -func LoadModel(model string) (*GGML, error) { +// LoadModel will load a model from disk. The model must be in the GGML format. +// +// It collects array values for arrays with a size less than or equal to +// maxArraySize. If maxArraySize is 0, the default value of 1024 is used. If +// the maxArraySize is negative, all arrays are collected. +func LoadModel(model string, maxArraySize int) (*GGML, error) { if _, err := os.Stat(model); err != nil { return nil, err } @@ -71,7 +76,7 @@ func LoadModel(model string) (*GGML, error) { } defer f.Close() - ggml, _, err := DecodeGGML(f) + ggml, _, err := DecodeGGML(f, maxArraySize) return ggml, err } @@ -412,7 +417,7 @@ func projectorMemoryRequirements(filename string) uint64 { } defer file.Close() - ggml, _, err := DecodeGGML(file) + ggml, _, err := DecodeGGML(file, 0) if err != nil { return 0 } diff --git a/server/images.go b/server/images.go index 98794149e..e949fb18a 100644 --- a/server/images.go +++ b/server/images.go @@ -423,7 +423,7 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio return err } - ggml, _, err := llm.DecodeGGML(temp) + ggml, _, err := llm.DecodeGGML(temp, 0) if err != nil { return err } diff --git a/server/model.go b/server/model.go index b262ea385..055ffd63a 100644 --- a/server/model.go +++ b/server/model.go @@ -63,7 +63,7 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe } defer blob.Close() - ggml, _, err := llm.DecodeGGML(blob) + ggml, _, err := llm.DecodeGGML(blob, 0) if err != nil { return nil, err } @@ -176,7 +176,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a } defer bin.Close() - ggml, _, err := llm.DecodeGGML(bin) + ggml, _, err := llm.DecodeGGML(bin, 0) if err != nil { return nil, err } @@ -210,7 +210,7 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap var offset int64 for offset < stat.Size() { - ggml, n, err := llm.DecodeGGML(file) + ggml, n, err := llm.DecodeGGML(file, 0) if errors.Is(err, io.EOF) { break } else if err != nil { diff --git a/server/routes.go b/server/routes.go index 3d112e9f1..ff66663c0 100644 --- a/server/routes.go +++ b/server/routes.go @@ -754,7 +754,11 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) { } func getKVData(digest string, verbose bool) (llm.KV, error) { - kvData, err := llm.LoadModel(digest) + maxArraySize := 0 + if verbose { + maxArraySize = -1 + } + kvData, err := llm.LoadModel(digest, maxArraySize) if err != nil { return nil, err } @@ -1101,11 +1105,20 @@ func Serve(ln net.Listener) error { schedCtx, schedDone := context.WithCancel(ctx) sched := InitScheduler(schedCtx) s := &Server{addr: ln.Addr(), sched: sched} - r := s.GenerateRoutes() + + http.Handle("/", s.GenerateRoutes()) slog.Info(fmt.Sprintf("Listening on %s (version %s)", ln.Addr(), version.Version)) srvr := &http.Server{ - Handler: r, + // Use http.DefaultServeMux so we get net/http/pprof for + // free. + // + // TODO(bmizerany): Decide if we want to make this + // configurable so it is not exposed by default, or allow + // users to bind it to a different port. This was a quick + // and easy way to get pprof, but it may not be the best + // way. + Handler: nil, } // listen for a ctrl+c and stop any loaded llm diff --git a/server/sched.go b/server/sched.go index 424395544..0084b533b 100644 --- a/server/sched.go +++ b/server/sched.go @@ -144,7 +144,7 @@ func (s *Scheduler) processPending(ctx context.Context) { } // Load model for fitting - ggml, err := llm.LoadModel(pending.model.ModelPath) + ggml, err := llm.LoadModel(pending.model.ModelPath, 0) if err != nil { pending.errCh <- err break diff --git a/server/sched_test.go b/server/sched_test.go index 953288347..4a1cf72a0 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -128,14 +128,14 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV "tokenizer.ggml.scores": []float32{0}, "tokenizer.ggml.token_type": []int32{0}, }, []llm.Tensor{ - {Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, - {Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}}, + {Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, + {Name: "output.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: bytes.NewReader(make([]byte, 32))}, }) require.NoError(t, err) fname := f.Name() model := &Model{Name: modelName, ModelPath: fname} - scenario.ggml, err = llm.LoadModel(model.ModelPath) + scenario.ggml, err = llm.LoadModel(model.ModelPath, 0) require.NoError(t, err) scenario.req = &LlmRequest{ diff --git a/util/bufioutil/buffer_seeker.go b/util/bufioutil/buffer_seeker.go new file mode 100644 index 000000000..8775fdb83 --- /dev/null +++ b/util/bufioutil/buffer_seeker.go @@ -0,0 +1,34 @@ +package bufioutil + +import ( + "bufio" + "io" +) + +type BufferedSeeker struct { + rs io.ReadSeeker + br *bufio.Reader +} + +func NewBufferedSeeker(rs io.ReadSeeker, size int) *BufferedSeeker { + return &BufferedSeeker{ + rs: rs, + br: bufio.NewReaderSize(rs, size), + } +} + +func (b *BufferedSeeker) Read(p []byte) (int, error) { + return b.br.Read(p) +} + +func (b *BufferedSeeker) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + offset -= int64(b.br.Buffered()) + } + n, err := b.rs.Seek(offset, whence) + if err != nil { + return 0, err + } + b.br.Reset(b.rs) + return n, nil +} diff --git a/util/bufioutil/buffer_seeker_test.go b/util/bufioutil/buffer_seeker_test.go new file mode 100644 index 000000000..87145f6b6 --- /dev/null +++ b/util/bufioutil/buffer_seeker_test.go @@ -0,0 +1,64 @@ +package bufioutil + +import ( + "bytes" + "io" + "strings" + "testing" +) + +func TestBufferedSeeker(t *testing.T) { + const alphabet = "abcdefghijklmnopqrstuvwxyz" + + bs := NewBufferedSeeker(strings.NewReader(alphabet), 0) // minReadBufferSize = 16 + + checkRead := func(buf []byte, expected string) { + t.Helper() + _, err := bs.Read(buf) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(buf, []byte(expected)) { + t.Fatalf("expected %s, got %s", expected, buf) + } + } + + // Read the first 5 bytes + buf := make([]byte, 5) + + checkRead(buf, "abcde") + + // Seek back to the beginning + _, err := bs.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + + // read 'a' + checkRead(buf[:1], "a") + + if bs.br.Buffered() == 0 { + t.Fatalf("totally unexpected sanity check failed") + } + + // Seek past 'b' + _, err = bs.Seek(1, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } + checkRead(buf, "cdefg") + + // Seek back to the beginning + _, err = bs.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + checkRead(buf, "abcde") + + // Seek to the end + _, err = bs.Seek(-5, io.SeekEnd) + if err != nil { + t.Fatal(err) + } + checkRead(buf, "vwxyz") +} From 4d311eb731bb59512bcd17f1f33d60f3d9022837 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 26 Jun 2024 21:38:12 -0700 Subject: [PATCH 016/106] llm: architecture patch (#5316) --- llm/patches/07-gemma.diff | 305 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 305 insertions(+) create mode 100644 llm/patches/07-gemma.diff diff --git a/llm/patches/07-gemma.diff b/llm/patches/07-gemma.diff new file mode 100644 index 000000000..86eac3d17 --- /dev/null +++ b/llm/patches/07-gemma.diff @@ -0,0 +1,305 @@ +From 5cadb45f39d001ffbad95b690d6cf0abcb4a6d96 Mon Sep 17 00:00:00 2001 +From: Ollama maintainers +Date: Wed, 26 Jun 2024 16:18:09 -0700 +Subject: [PATCH] Architecture support + +--- + llama.cpp | 194 +++++++++++++++++++++++++++++++++++++++++++++++++++++- + 1 file changed, 193 insertions(+), 1 deletion(-) + +diff --git a/llama.cpp b/llama.cpp +index 61948751..3b4196f5 100644 +--- a/llama.cpp ++++ b/llama.cpp +@@ -217,6 +217,7 @@ enum llm_arch { + LLM_ARCH_INTERNLM2, + LLM_ARCH_MINICPM, + LLM_ARCH_GEMMA, ++ LLM_ARCH_GEMMA2, + LLM_ARCH_STARCODER2, + LLM_ARCH_MAMBA, + LLM_ARCH_XVERSE, +@@ -255,6 +256,7 @@ static const std::map LLM_ARCH_NAMES = { + { LLM_ARCH_INTERNLM2, "internlm2" }, + { LLM_ARCH_MINICPM, "minicpm" }, + { LLM_ARCH_GEMMA, "gemma" }, ++ { LLM_ARCH_GEMMA2, "gemma2" }, + { LLM_ARCH_STARCODER2, "starcoder2" }, + { LLM_ARCH_MAMBA, "mamba" }, + { LLM_ARCH_XVERSE, "xverse" }, +@@ -464,10 +466,12 @@ enum llm_tensor { + LLM_TENSOR_ATTN_NORM, + LLM_TENSOR_ATTN_NORM_2, + LLM_TENSOR_ATTN_OUT_NORM, ++ LLM_TENSOR_ATTN_POST_NORM, + LLM_TENSOR_ATTN_ROT_EMBD, + LLM_TENSOR_FFN_GATE_INP, + LLM_TENSOR_FFN_GATE_INP_SHEXP, + LLM_TENSOR_FFN_NORM, ++ LLM_TENSOR_FFN_POST_NORM, + LLM_TENSOR_FFN_GATE, + LLM_TENSOR_FFN_DOWN, + LLM_TENSOR_FFN_UP, +@@ -960,6 +964,24 @@ static const std::map> LLM_TENSOR_NA + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, ++ { ++ LLM_ARCH_GEMMA2, ++ { ++ { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, ++ { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, ++ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, ++ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, ++ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, ++ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, ++ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, ++ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, ++ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, ++ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, ++ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, ++ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, ++ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, ++ }, ++ }, + { + LLM_ARCH_STARCODER2, + { +@@ -1941,6 +1963,8 @@ enum e_model { + MODEL_8x22B, + MODEL_16x12B, + MODEL_10B_128x3_66B, ++ MODEL_9B, ++ MODEL_27B, + }; + + static const size_t kiB = 1024; +@@ -2114,6 +2138,7 @@ struct llama_layer { + struct ggml_tensor * attn_out_norm_b; + struct ggml_tensor * attn_q_a_norm; + struct ggml_tensor * attn_kv_a_norm; ++ struct ggml_tensor * attn_post_norm; + + // attention + struct ggml_tensor * wq; +@@ -2136,6 +2161,7 @@ struct llama_layer { + // normalization + struct ggml_tensor * ffn_norm; + struct ggml_tensor * ffn_norm_b; ++ struct ggml_tensor * ffn_post_norm; + struct ggml_tensor * layer_out_norm; + struct ggml_tensor * layer_out_norm_b; + struct ggml_tensor * ffn_norm_exps; +@@ -4529,6 +4555,16 @@ static void llm_load_hparams( + } + } break; + case LLM_ARCH_GEMMA: ++ { ++ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ++ ++ switch (hparams.n_layer) { ++ case 18: model.type = e_model::MODEL_9B; break; ++ case 28: model.type = e_model::MODEL_27B; break; ++ default: model.type = e_model::MODEL_UNKNOWN; ++ } ++ } break; ++ case LLM_ARCH_GEMMA2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + +@@ -6305,6 +6341,40 @@ static bool llm_load_tensors( + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); + } + } break; ++ case LLM_ARCH_GEMMA2: ++ { ++ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); ++ ++ // output ++ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); ++ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading ++ ++ const int64_t n_ff = hparams.n_ff; ++ const int64_t n_embd_head_k = hparams.n_embd_head_k; ++ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); ++ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); ++ ++ for (uint32_t i = 0; i < n_layer; ++i) { ++ ggml_context * ctx_layer = ctx_for_layer(i); ++ ggml_context * ctx_split = ctx_for_layer_split(i); ++ ++ auto & layer = model.layers[i]; ++ ++ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); ++ ++ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * hparams.n_head}); ++ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}); ++ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}); ++ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * hparams.n_head, n_embd}); ++ layer.attn_post_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}); ++ ++ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); ++ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); ++ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); ++ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); ++ layer.ffn_post_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}); ++ } ++ } break; + case LLM_ARCH_STARCODER2: + { + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); +@@ -10614,6 +10684,123 @@ struct llm_build_context { + return gf; + } + ++ struct ggml_cgraph * build_gemma2() { ++ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); ++ ++ const int64_t n_embd_head_k = hparams.n_embd_head_k; ++ ++ struct ggml_tensor * cur; ++ struct ggml_tensor * inpL; ++ ++ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb); ++ ++ inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); ++ cb(inpL, "inp_scaled", -1); ++ ++ // inp_pos - contains the positions ++ struct ggml_tensor * inp_pos = build_inp_pos(); ++ ++ // KQ_mask (mask for 1 head, it will be broadcasted to all heads) ++ struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); ++ ++ for (int il = 0; il < n_layer; ++il) { ++ // norm ++ cur = llm_build_norm(ctx0, inpL, hparams, ++ model.layers[il].attn_norm, NULL, ++ LLM_NORM_RMS, cb, il); ++ cb(cur, "attn_norm", il); ++ ++ // self-attention ++ { ++ // compute Q and K and RoPE them ++ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); ++ cb(Qcur, "Qcur", il); ++ ++ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); ++ cb(Kcur, "Kcur", il); ++ ++ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); ++ cb(Vcur, "Vcur", il); ++ ++ Qcur = ggml_rope_ext( ++ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, ++ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale, ++ ext_factor, attn_factor, beta_fast, beta_slow); ++ cb(Qcur, "Qcur", il); ++ ++ Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); ++ cb(Qcur, "Qcur_scaled", il); ++ ++ Kcur = ggml_rope_ext( ++ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, ++ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale, ++ ext_factor, attn_factor, beta_fast, beta_slow); ++ cb(Kcur, "Kcur", il); ++ ++ cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, ++ model.layers[il].wo, NULL, ++ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); ++ } ++ ++ if (il == n_layer - 1) { ++ // skip computing output for unused tokens ++ struct ggml_tensor * inp_out_ids = build_inp_out_ids(); ++ cur = ggml_get_rows(ctx0, cur, inp_out_ids); ++ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); ++ } ++ ++ cur = llm_build_norm(ctx0, cur, hparams, ++ model.layers[il].attn_post_norm, NULL, ++ LLM_NORM_RMS, cb, il); ++ cb(cur, "attn_post_norm", il); ++ ++ struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); ++ cb(sa_out, "sa_out", il); ++ ++ cur = llm_build_norm(ctx0, sa_out, hparams, ++ model.layers[il].ffn_norm, NULL, ++ LLM_NORM_RMS, cb, il); ++ cb(cur, "ffn_norm", il); ++ ++ // feed-forward network ++ { ++ cur = llm_build_ffn(ctx0, cur, ++ model.layers[il].ffn_up, NULL, ++ model.layers[il].ffn_gate, NULL, ++ model.layers[il].ffn_down, NULL, ++ NULL, ++ LLM_FFN_GELU, LLM_FFN_PAR, cb, il); ++ cb(cur, "ffn_out", il); ++ } ++ ++ cur = llm_build_norm(ctx0, cur, hparams, ++ model.layers[il].ffn_post_norm, NULL, ++ LLM_NORM_RMS, cb, -1); ++ cb(cur, "ffn_post_norm", -1); ++ ++ cur = ggml_add(ctx0, cur, sa_out); ++ cb(cur, "l_out", il); ++ ++ // input for next layer ++ inpL = cur; ++ } ++ ++ cur = inpL; ++ ++ cur = llm_build_norm(ctx0, cur, hparams, ++ model.output_norm, NULL, ++ LLM_NORM_RMS, cb, -1); ++ cb(cur, "result_norm", -1); ++ ++ // lm_head ++ cur = ggml_mul_mat(ctx0, model.output, cur); ++ cb(cur, "result_output", -1); ++ ++ ggml_build_forward_expand(gf, cur); ++ ++ return gf; ++ } ++ + struct ggml_cgraph * build_starcoder2() { + struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); + +@@ -11847,6 +12034,10 @@ static struct ggml_cgraph * llama_build_graph( + { + result = llm.build_gemma(); + } break; ++ case LLM_ARCH_GEMMA2: ++ { ++ result = llm.build_gemma2(); ++ } break; + case LLM_ARCH_STARCODER2: + { + result = llm.build_starcoder2(); +@@ -16671,6 +16862,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { + case LLM_ARCH_PHI2: + case LLM_ARCH_PHI3: + case LLM_ARCH_GEMMA: ++ case LLM_ARCH_GEMMA2: + case LLM_ARCH_STARCODER2: + case LLM_ARCH_GPTNEOX: + return LLAMA_ROPE_TYPE_NEOX; +@@ -18551,7 +18743,7 @@ static int32_t llama_chat_apply_template_internal( + if (add_ass) { + ss << "assistant\n"; + } +- } else if (tmpl == "gemma" || tmpl.find("") != std::string::npos) { ++ } else if (tmpl == "gemma" || tmpl == "gemma2" || tmpl.find("") != std::string::npos) { + // google/gemma-7b-it + std::string system_prompt = ""; + for (auto message : chat) { +-- +2.45.2 + From 123a722a6f541e300bc8e34297ac378ebe23f527 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 26 Jun 2024 21:38:21 -0700 Subject: [PATCH 017/106] zip: prevent extracting files into parent dirs (#5314) --- cmd/cmd.go | 6 +-- server/model.go | 57 ++++++++++++++++++--------- server/model_test.go | 92 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 22 deletions(-) create mode 100644 server/model_test.go diff --git a/cmd/cmd.go b/cmd/cmd.go index 89b551f40..909e8e4b2 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -162,9 +162,6 @@ func tempZipFiles(path string) (string, error) { } defer tempfile.Close() - zipfile := zip.NewWriter(tempfile) - defer zipfile.Close() - detectContentType := func(path string) (string, error) { f, err := os.Open(path) if err != nil { @@ -233,6 +230,9 @@ func tempZipFiles(path string) (string, error) { files = append(files, tks...) } + zipfile := zip.NewWriter(tempfile) + defer zipfile.Close() + for _, file := range files { f, err := os.Open(file) if err != nil { diff --git a/server/model.go b/server/model.go index 055ffd63a..d56e641ba 100644 --- a/server/model.go +++ b/server/model.go @@ -11,6 +11,7 @@ import ( "net/http" "os" "path/filepath" + "strings" "github.com/ollama/ollama/api" "github.com/ollama/ollama/convert" @@ -77,62 +78,80 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe return layers, nil } -func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) { +func extractFromZipFile(p string, file *os.File, fn func(api.ProgressResponse)) error { stat, err := file.Stat() if err != nil { - return nil, err + return err } r, err := zip.NewReader(file, stat.Size()) if err != nil { - return nil, err + return err } - tempdir, err := os.MkdirTemp(filepath.Dir(file.Name()), "") - if err != nil { - return nil, err - } - defer os.RemoveAll(tempdir) - fn(api.ProgressResponse{Status: "unpacking model metadata"}) for _, f := range r.File { + n := filepath.Join(p, f.Name) + if !strings.HasPrefix(n, p) { + slog.Warn("skipped extracting file outside of context", "name", f.Name) + continue + } + + if err := os.MkdirAll(filepath.Dir(n), 0o750); err != nil { + return err + } + // TODO(mxyng): this should not write out all files to disk - outfile, err := os.Create(filepath.Join(tempdir, f.Name)) + outfile, err := os.Create(n) if err != nil { - return nil, err + return err } defer outfile.Close() infile, err := f.Open() if err != nil { - return nil, err + return err } defer infile.Close() if _, err = io.Copy(outfile, infile); err != nil { - return nil, err + return err } if err := outfile.Close(); err != nil { - return nil, err + return err } if err := infile.Close(); err != nil { - return nil, err + return err } } - mf, err := convert.GetModelFormat(tempdir) + return nil +} + +func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) { + tempDir, err := os.MkdirTemp(filepath.Dir(file.Name()), "") + if err != nil { + return nil, err + } + defer os.RemoveAll(tempDir) + + if err := extractFromZipFile(tempDir, file, fn); err != nil { + return nil, err + } + + mf, err := convert.GetModelFormat(tempDir) if err != nil { return nil, err } - params, err := mf.GetParams(tempdir) + params, err := mf.GetParams(tempDir) if err != nil { return nil, err } - mArch, err := mf.GetModelArch("", tempdir, params) + mArch, err := mf.GetModelArch("", tempDir, params) if err != nil { return nil, err } @@ -150,7 +169,7 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a // TODO(mxyng): this should write directly into a layer // e.g. NewLayer(arch.Reader(), "application/vnd.ollama.image.model") - temp, err := os.CreateTemp(tempdir, "fp16") + temp, err := os.CreateTemp(tempDir, "fp16") if err != nil { return nil, err } diff --git a/server/model_test.go b/server/model_test.go new file mode 100644 index 000000000..c3023eb2b --- /dev/null +++ b/server/model_test.go @@ -0,0 +1,92 @@ +package server + +import ( + "archive/zip" + "bytes" + "io" + "os" + "path/filepath" + "slices" + "testing" + + "github.com/ollama/ollama/api" +) + +func createZipFile(t *testing.T, name string) *os.File { + t.Helper() + + f, err := os.CreateTemp(t.TempDir(), "") + if err != nil { + t.Fatal(err) + } + + zf := zip.NewWriter(f) + defer zf.Close() + + zh, err := zf.CreateHeader(&zip.FileHeader{Name: name}) + if err != nil { + t.Fatal(err) + } + + if _, err := io.Copy(zh, bytes.NewReader([]byte(""))); err != nil { + t.Fatal(err) + } + + return f +} + +func TestExtractFromZipFile(t *testing.T) { + cases := []struct { + name string + expect []string + }{ + { + name: "good", + expect: []string{"good"}, + }, + { + name: filepath.Join("..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "bad"), + }, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + f := createZipFile(t, tt.name) + defer f.Close() + + tempDir := t.TempDir() + if err := extractFromZipFile(tempDir, f, func(api.ProgressResponse) {}); err != nil { + t.Fatal(err) + } + + var matches []string + if err := filepath.Walk(tempDir, func(p string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if !fi.IsDir() { + matches = append(matches, p) + } + + return nil + }); err != nil { + t.Fatal(err) + } + + var actual []string + for _, match := range matches { + rel, err := filepath.Rel(tempDir, match) + if err != nil { + t.Error(err) + } + + actual = append(actual, rel) + } + + if !slices.Equal(actual, tt.expect) { + t.Fatalf("expected %d files, got %d", len(tt.expect), len(matches)) + } + }) + } +} From 2cc7d050124929ae4745633fddf053585a22f0a2 Mon Sep 17 00:00:00 2001 From: Michael Date: Thu, 27 Jun 2024 12:45:16 -0400 Subject: [PATCH 018/106] update readme for gemma 2 (#5333) * update readme for gemma 2 --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 978625731..72ed8fa5e 100644 --- a/README.md +++ b/README.md @@ -53,8 +53,8 @@ Here are some example models that can be downloaded: | Llama 3 | 70B | 40GB | `ollama run llama3:70b` | | Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` | | Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` | -| Gemma | 2B | 1.4GB | `ollama run gemma:2b` | -| Gemma | 7B | 4.8GB | `ollama run gemma:7b` | +| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` | +| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` | | Mistral | 7B | 4.1GB | `ollama run mistral` | | Moondream 2 | 1.4B | 829MB | `ollama run moondream` | | Neural Chat | 7B | 4.1GB | `ollama run neural-chat` | From 4e986a823ca47eb16f563d15a6fe4cc393a00715 Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Thu, 27 Jun 2024 10:59:15 -0700 Subject: [PATCH 019/106] unquote, trimp space --- parser/parser.go | 9 ++++++++- parser/parser_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/parser/parser.go b/parser/parser.go index 686a1e695..fa60ebc0f 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -125,6 +125,7 @@ func ParseFile(r io.Reader) (*File, error) { // pass case stateValue: s, ok := unquote(b.String()) + if !ok || isSpace(r) { if _, err := b.WriteRune(r); err != nil { return nil, err @@ -158,7 +159,13 @@ func ParseFile(r io.Reader) (*File, error) { case stateComment, stateNil: // pass; nothing to flush case stateValue: - s, ok := unquote(b.String()) + var s string + var ok bool + if cmd.Name == "model" { + s, ok = unquote(strings.TrimSpace(b.String())) + } else { + s, ok = unquote(b.String()) + } if !ok { return nil, io.ErrUnexpectedEOF } diff --git a/parser/parser_test.go b/parser/parser_test.go index 7123e53bf..35556515d 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -48,6 +48,26 @@ func TestParseFileFrom(t *testing.T) { expected []Command err error }{ + { + "FROM \"FOO BAR \"", + []Command{{Name: "model", Args: "FOO BAR "}}, + nil, + }, + { + "FROM \"FOO BAR\"\nPARAMETER param1 value1", + []Command{{Name: "model", Args: "FOO BAR"}, {Name: "param1", Args: "value1"}}, + nil, + }, + { + "FROM FOOO BAR ", + []Command{{Name: "model", Args: "FOOO BAR"}}, + nil, + }, + { + "FROM /what/is/the path ", + []Command{{Name: "model", Args: "/what/is/the path"}}, + nil, + }, { "FROM foo", []Command{{Name: "model", Args: "foo"}}, @@ -86,6 +106,11 @@ func TestParseFileFrom(t *testing.T) { []Command{{Name: "param1", Args: "value1"}, {Name: "model", Args: "foo"}}, nil, }, + { + "PARAMETER what the \nFROM lemons make lemonade ", + []Command{{Name: "what", Args: "the "}, {Name: "model", Args: "lemons make lemonade"}}, + nil, + }, } for _, c := range cases { From 9bd00041fa1c82881299f34a5950f9edc2a7e66c Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Thu, 27 Jun 2024 11:18:38 -0700 Subject: [PATCH 020/106] trim all params --- parser/parser.go | 11 ++--------- parser/parser_test.go | 4 ++-- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/parser/parser.go b/parser/parser.go index fa60ebc0f..7f566da4e 100644 --- a/parser/parser.go +++ b/parser/parser.go @@ -124,8 +124,7 @@ func ParseFile(r io.Reader) (*File, error) { case stateComment, stateNil: // pass case stateValue: - s, ok := unquote(b.String()) - + s, ok := unquote(strings.TrimSpace(b.String())) if !ok || isSpace(r) { if _, err := b.WriteRune(r); err != nil { return nil, err @@ -159,13 +158,7 @@ func ParseFile(r io.Reader) (*File, error) { case stateComment, stateNil: // pass; nothing to flush case stateValue: - var s string - var ok bool - if cmd.Name == "model" { - s, ok = unquote(strings.TrimSpace(b.String())) - } else { - s, ok = unquote(b.String()) - } + s, ok := unquote(strings.TrimSpace(b.String())) if !ok { return nil, io.ErrUnexpectedEOF } diff --git a/parser/parser_test.go b/parser/parser_test.go index 35556515d..3dc592239 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -108,7 +108,7 @@ func TestParseFileFrom(t *testing.T) { }, { "PARAMETER what the \nFROM lemons make lemonade ", - []Command{{Name: "what", Args: "the "}, {Name: "model", Args: "lemons make lemonade"}}, + []Command{{Name: "what", Args: "the"}, {Name: "model", Args: "lemons make lemonade"}}, nil, }, } @@ -424,7 +424,7 @@ func TestParseFileParameters(t *testing.T) { "mirostat_eta 1.0": {"mirostat_eta", "1.0"}, "penalize_newline true": {"penalize_newline", "true"}, "stop ### User:": {"stop", "### User:"}, - "stop ### User: ": {"stop", "### User: "}, + "stop ### User: ": {"stop", "### User:"}, "stop \"### User:\"": {"stop", "### User:"}, "stop \"### User: \"": {"stop", "### User: "}, "stop \"\"\"### User:\"\"\"": {"stop", "### User:"}, From de2163dafd19b5ba2bed3d459354179662cc524d Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 27 Jun 2024 10:52:25 -0700 Subject: [PATCH 021/106] gemma2 graph --- llm/ggml.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/llm/ggml.go b/llm/ggml.go index d0d0b6ddc..cfead450d 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -366,9 +366,18 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui 4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16), ) } - case "gemma": - fullOffload = 4 * batch * (embedding + vocab) - partialOffload = 4*batch*(2*embedding+vocab+1) + embedding*vocab*105/128 + case "gemma", "gemma2": + fullOffload = max( + 4*batch*(embedding+vocab), + 4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads), + ) + + partialOffload = max( + 4*embedding*batch+embedding*vocab*105/128+4*vocab*batch, + 4*batch*(2*embedding+1+2*embeddingHeadsK*heads+context+context*heads)+ + 4*embeddingHeadsK*context*8+ + embedding*embeddingHeadsK*heads*9/16, + ) case "command-r": fullOffload = max( 4*batch*(embedding+vocab), From 6d4219083c56ec4b031f0fda67e9ef2c09ad9888 Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Fri, 28 Jun 2024 09:58:14 -0700 Subject: [PATCH 022/106] Update docs (#5312) --- docs/openai.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/openai.md b/docs/openai.md index 59e7d6405..81b967eb7 100644 --- a/docs/openai.md +++ b/docs/openai.md @@ -104,7 +104,6 @@ curl http://localhost:11434/v1/chat/completions \ #### Notes -- `finish_reason` will always be `stop` - `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached ## Models From b910fa90101038d09ca9cbbea16701831fafaffb Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Fri, 28 Jun 2024 11:30:16 -0700 Subject: [PATCH 023/106] Ollama Show: Check for Projector Type (#5307) * Check exists projtype * Maintain Ordering --- cmd/cmd.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index 909e8e4b2..debb39218 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -672,11 +672,17 @@ func ShowHandler(cmd *cobra.Command, args []string) error { projectorData := [][]string{ {"arch", "clip"}, {"parameters", format.HumanNumber(uint64(resp.ProjectorInfo["general.parameter_count"].(float64)))}, - {"projector type", resp.ProjectorInfo["clip.projector_type"].(string)}, - {"embedding length", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.embedding_length"].(float64))}, - {"projection dimensionality", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.projection_dim"].(float64))}, } + if projectorType, ok := resp.ProjectorInfo["clip.projector_type"]; ok { + projectorData = append(projectorData, []string{"projector type", projectorType.(string)}) + } + + projectorData = append(projectorData, + []string{"embedding length", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.embedding_length"].(float64))}, + []string{"projection dimensionality", fmt.Sprintf("%v", resp.ProjectorInfo["clip.vision.projection_dim"].(float64))}, + ) + mainTableData = append(mainTableData, []string{"Projector"}, []string{renderSubTable(projectorData, false)}, From 5f034f5b63cab3a5eb61104118727b088cceea21 Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Fri, 28 Jun 2024 13:15:52 -0700 Subject: [PATCH 024/106] Include Show Info in Interactive (#5342) --- cmd/cmd.go | 24 +++++++++++------------- cmd/interactive.go | 10 +--------- 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index debb39218..c898c7db6 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -624,13 +624,13 @@ func ShowHandler(cmd *cobra.Command, args []string) error { return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified") } - if flagsSet == 1 { - req := api.ShowRequest{Name: args[0]} - resp, err := client.Show(cmd.Context(), &req) - if err != nil { - return err - } + req := api.ShowRequest{Name: args[0]} + resp, err := client.Show(cmd.Context(), &req) + if err != nil { + return err + } + if flagsSet == 1 { switch showType { case "license": fmt.Println(resp.License) @@ -647,12 +647,12 @@ func ShowHandler(cmd *cobra.Command, args []string) error { return nil } - req := api.ShowRequest{Name: args[0]} - resp, err := client.Show(cmd.Context(), &req) - if err != nil { - return err - } + showInfo(resp) + return nil +} + +func showInfo(resp *api.ShowResponse) { arch := resp.ModelInfo["general.architecture"].(string) modelData := [][]string{ @@ -711,8 +711,6 @@ func ShowHandler(cmd *cobra.Command, args []string) error { } table.Render() - - return nil } func renderSubTable(data [][]string, file bool) string { diff --git a/cmd/interactive.go b/cmd/interactive.go index 0a2f429b6..9214f2db5 100644 --- a/cmd/interactive.go +++ b/cmd/interactive.go @@ -404,15 +404,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error { switch args[1] { case "info": - fmt.Println("Model details:") - if len(resp.Details.Families) > 0 { - fmt.Printf("Family %s\n", strings.Join(resp.Details.Families, ", ")) - } else if resp.Details.Family != "" { - fmt.Printf("Family %s\n", resp.Details.Family) - } - fmt.Printf("Parameter Size %s\n", resp.Details.ParameterSize) - fmt.Printf("Quantization Level %s\n", resp.Details.QuantizationLevel) - fmt.Println("") + showInfo(resp) case "license": if resp.License == "" { fmt.Println("No license was specified for this model.") From aae56abb7cc96b8495a1c761a08b92cfd136d9d2 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 28 Jun 2024 13:15:57 -0700 Subject: [PATCH 025/106] Document concurrent behavior and settings --- docs/faq.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/docs/faq.md b/docs/faq.md index b50a3138c..841f1d13d 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -257,3 +257,17 @@ If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` AP ## How do I manage the maximum number of requests the Ollama server can queue? If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`. + +## How does Ollama handle concurrent requests? + +Ollama supports two levels of concurrent processing. If your system has sufficient available memory (system memory when using CPU inference, or VRAM for GPU inference) then multiple models can be loaded at the same time. For a given model, if there is sufficient available memory when the model is loaded, it is configured to allow parallel request processing. + +If there is insufficient available memory to load a new model request while one or more models are already loaded, all new requests will be queued until the new model can be loaded. As prior models become idle, one or more will be unloaded to make room for the new model. Queued requests will be processed in order. When using GPU inference new models must be able to completely fit in VRAM to allow concurrent model loads. + +Parallel request processing for a given model results in increasing the context size by the number of parallel requests. For example, a 2K context with 4 parallel requests will result in an 8K context and additional memory allocation. + +The following server settings may be used to adjust how Ollama handles concurrent requests: + +- `OLLAMA_MAX_LOADED_MODELS` - The maximum number of models that can be loaded concurrently provided they fit in available memory. The default is 3 * the number of GPUs or 3 for CPU inference. +- `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory. +- `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512 From 717f7229eb4f9220d4070aae617923950643d327 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 28 Jun 2024 19:39:31 -0700 Subject: [PATCH 026/106] Do not shift context for sliding window models (#5368) * Do not shift context for sliding window models * truncate prompt > 2/3 tokens * only target gemma2 --- llm/ext_server/server.cpp | 46 +++++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 9 deletions(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 492126a4f..3bc012521 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1650,26 +1650,41 @@ struct llama_server_context } slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep); + char buf[256]; + llama_model_meta_val_str(model, "general.architecture", buf, 256); + bool gemma2 = strcmp(buf, "gemma2") == 0; + + int32_t truncate_at = slot.n_ctx; + + // truncate at 2/3 of the context length for gemma2 models + // as they do not support context shifts (from the sliding window implementation). + // this way, prompts that almost fit the context length can still generate a full + // response without a sudden stop from hitting the context limit + if (gemma2) { + truncate_at = 2 * slot.n_ctx / 3; + } + // if input prompt is too big, truncate it, if group attention self-extend is disabled - if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx) + if (slot.ga_n == 1 && slot.n_prompt_tokens >= truncate_at) { const int n_left = slot.n_ctx - slot.params.n_keep; - const int n_block_size = n_left / 2; - const int erased_blocks = (slot.n_prompt_tokens - slot.params.n_keep - n_block_size) / n_block_size; + const int n_shift = n_left / 2; + const int n_erase = slot.n_prompt_tokens - slot.params.n_keep - n_shift; std::vector new_tokens( prompt_tokens.begin(), prompt_tokens.begin() + slot.params.n_keep); new_tokens.insert( new_tokens.end(), - prompt_tokens.begin() + slot.params.n_keep + erased_blocks * n_block_size, + prompt_tokens.begin() + slot.params.n_keep + n_erase, prompt_tokens.end()); - LOG_VERBOSE("input truncated", { - {"n_ctx", slot.n_ctx}, - {"n_keep", slot.params.n_keep}, - {"n_left", n_left}, - {"new_tokens", tokens_to_str(ctx, new_tokens.cbegin(), new_tokens.cend())}, + LOG_INFO("input truncated", { + {"n_ctx", slot.n_ctx}, + {"n_keep", slot.params.n_keep}, + {"n_left", n_left}, + {"n_shift", n_shift}, + {"n_erase", n_erase}, }); slot.truncated = true; prompt_tokens = new_tokens; @@ -1678,6 +1693,19 @@ struct llama_server_context GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx); } + // Models with sliding window attention do not work with context shifts, so + // limit their prediction to the context length + if (gemma2) { + int32_t limit = slot.n_ctx - slot.n_prompt_tokens; + slot.n_predict = limit; + slot.params.n_predict = limit; + LOG_INFO("model does not support sliding window, limiting generation", { + {"n_ctx", slot.n_ctx}, + {"n_prompt_tokens", slot.n_prompt_tokens}, + {"n_predict", slot.n_predict} + }); + } + if (!slot.params.cache_prompt) { llama_sampling_reset(slot.ctx_sampling); From c1218199cfe82eda35f5e4a8031eee28f01ebf75 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 29 Jun 2024 16:22:49 -0700 Subject: [PATCH 027/106] Update api.md --- docs/api.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api.md b/docs/api.md index 107b5211f..c577bb1a5 100644 --- a/docs/api.md +++ b/docs/api.md @@ -26,7 +26,7 @@ All durations are returned in nanoseconds. ### Streaming responses -Certain endpoints stream responses as JSON objects and can optional return non-streamed responses. +Certain endpoints stream responses as JSON objects. Streaming can be disabled by providing `{"stream": false}` for these endpoints. ## Generate a completion From 27402cb7a28555a3efcaa5af054b1ce2d18e5442 Mon Sep 17 00:00:00 2001 From: Eduard Date: Mon, 1 Jul 2024 03:48:51 +0200 Subject: [PATCH 028/106] Update gpu.md (#5382) Runs fine on a NVIDIA GeForce GTX 1050 Ti --- docs/gpu.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/gpu.md b/docs/gpu.md index 55c41c9de..80f276c3b 100644 --- a/docs/gpu.md +++ b/docs/gpu.md @@ -18,7 +18,7 @@ Check your compute compatibility to see if your card is supported: | | Quadro | `RTX 8000` `RTX 6000` `RTX 5000` `RTX 4000` | | 7.0 | NVIDIA | `TITAN V` `V100` `Quadro GV100` | | 6.1 | NVIDIA TITAN | `TITAN Xp` `TITAN X` | -| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050` | +| | GeForce GTX | `GTX 1080 Ti` `GTX 1080` `GTX 1070 Ti` `GTX 1070` `GTX 1060` `GTX 1050 Ti` `GTX 1050` | | | Quadro | `P6000` `P5200` `P4200` `P3200` `P5000` `P4000` `P3000` `P2200` `P2000` `P1000` `P620` `P600` `P500` `P520` | | | Tesla | `P40` `P4` | | 6.0 | NVIDIA | `Tesla P100` `Quadro GP100` | From 1963c00201958da7165a40f9d2f22b28e11be718 Mon Sep 17 00:00:00 2001 From: RAPID ARCHITECT <126218667+rapidarchitect@users.noreply.github.com> Date: Sun, 30 Jun 2024 21:00:57 -0500 Subject: [PATCH 029/106] Update README.md (#5214) * Update README.md Added Mesop example to web & desktop * Update README.md --------- Co-authored-by: Jeffrey Morgan --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 72ed8fa5e..62f5cd65c 100644 --- a/README.md +++ b/README.md @@ -292,6 +292,7 @@ See the [API documentation](./docs/api.md) for all endpoints. - [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama) - [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS) - [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama) +- [Ollama with Google Mesop](https://github.com/rapidarchitect/ollama_mesop/) (Mesop Chat Client implementation with Ollama) ### Terminal From 97c9e11768292c8f2732e2f4c9cde72a604c936b Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 28 Jun 2024 09:57:10 -0700 Subject: [PATCH 030/106] Switch use_mmap to a pointer type This uses nil as undefined for a cleaner implementation. --- api/types.go | 105 ++++++++++++++++------------------------------ api/types_test.go | 40 ++++++++++-------- llm/server.go | 11 ++--- 3 files changed, 63 insertions(+), 93 deletions(-) diff --git a/api/types.go b/api/types.go index 95ed5d37e..3b67d57a3 100644 --- a/api/types.go +++ b/api/types.go @@ -159,49 +159,18 @@ type Options struct { // Runner options which must be set when the model is loaded into memory type Runner struct { - UseNUMA bool `json:"numa,omitempty"` - NumCtx int `json:"num_ctx,omitempty"` - NumBatch int `json:"num_batch,omitempty"` - NumGPU int `json:"num_gpu,omitempty"` - MainGPU int `json:"main_gpu,omitempty"` - LowVRAM bool `json:"low_vram,omitempty"` - F16KV bool `json:"f16_kv,omitempty"` - LogitsAll bool `json:"logits_all,omitempty"` - VocabOnly bool `json:"vocab_only,omitempty"` - UseMMap TriState `json:"use_mmap,omitempty"` - UseMLock bool `json:"use_mlock,omitempty"` - NumThread int `json:"num_thread,omitempty"` -} - -type TriState int - -const ( - TriStateUndefined TriState = -1 - TriStateFalse TriState = 0 - TriStateTrue TriState = 1 -) - -func (b *TriState) UnmarshalJSON(data []byte) error { - var v bool - if err := json.Unmarshal(data, &v); err != nil { - return err - } - if v { - *b = TriStateTrue - } - *b = TriStateFalse - return nil -} - -func (b *TriState) MarshalJSON() ([]byte, error) { - if *b == TriStateUndefined { - return nil, nil - } - var v bool - if *b == TriStateTrue { - v = true - } - return json.Marshal(v) + UseNUMA bool `json:"numa,omitempty"` + NumCtx int `json:"num_ctx,omitempty"` + NumBatch int `json:"num_batch,omitempty"` + NumGPU int `json:"num_gpu,omitempty"` + MainGPU int `json:"main_gpu,omitempty"` + LowVRAM bool `json:"low_vram,omitempty"` + F16KV bool `json:"f16_kv,omitempty"` + LogitsAll bool `json:"logits_all,omitempty"` + VocabOnly bool `json:"vocab_only,omitempty"` + UseMMap *bool `json:"use_mmap,omitempty"` + UseMLock bool `json:"use_mlock,omitempty"` + NumThread int `json:"num_thread,omitempty"` } // EmbeddingRequest is the request passed to [Client.Embeddings]. @@ -437,19 +406,6 @@ func (opts *Options) FromMap(m map[string]interface{}) error { continue } - if reflect.PointerTo(field.Type()) == reflect.TypeOf((*TriState)(nil)) { - val, ok := val.(bool) - if !ok { - return fmt.Errorf("option %q must be of type boolean", key) - } - if val { - field.SetInt(int64(TriStateTrue)) - } else { - field.SetInt(int64(TriStateFalse)) - } - continue - } - switch field.Kind() { case reflect.Int: switch t := val.(type) { @@ -496,6 +452,17 @@ func (opts *Options) FromMap(m map[string]interface{}) error { slice[i] = str } field.Set(reflect.ValueOf(slice)) + case reflect.Pointer: + var b bool + if field.Type() == reflect.TypeOf(&b) { + val, ok := val.(bool) + if !ok { + return fmt.Errorf("option %q must be of type boolean", key) + } + field.Set(reflect.ValueOf(&val)) + } else { + return fmt.Errorf("unknown type loading config params: %v %v", field.Kind(), field.Type()) + } default: return fmt.Errorf("unknown type loading config params: %v", field.Kind()) } @@ -538,7 +505,7 @@ func DefaultOptions() Options { LowVRAM: false, F16KV: true, UseMLock: false, - UseMMap: TriStateUndefined, + UseMMap: nil, UseNUMA: false, }, } @@ -608,19 +575,6 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) { } else { field := valueOpts.FieldByName(opt.Name) if field.IsValid() && field.CanSet() { - if reflect.PointerTo(field.Type()) == reflect.TypeOf((*TriState)(nil)) { - boolVal, err := strconv.ParseBool(vals[0]) - if err != nil { - return nil, fmt.Errorf("invalid bool value %s", vals) - } - if boolVal { - out[key] = TriStateTrue - } else { - out[key] = TriStateFalse - } - continue - } - switch field.Kind() { case reflect.Float32: floatVal, err := strconv.ParseFloat(vals[0], 32) @@ -648,6 +602,17 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) { case reflect.Slice: // TODO: only string slices are supported right now out[key] = vals + case reflect.Pointer: + var b bool + if field.Type() == reflect.TypeOf(&b) { + boolVal, err := strconv.ParseBool(vals[0]) + if err != nil { + return nil, fmt.Errorf("invalid bool value %s", vals) + } + out[key] = &boolVal + } else { + return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key) + } default: return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key) } diff --git a/api/types_test.go b/api/types_test.go index 8b6c60c62..c60ed90e0 100644 --- a/api/types_test.go +++ b/api/types_test.go @@ -108,25 +108,27 @@ func TestDurationMarshalUnmarshal(t *testing.T) { } func TestUseMmapParsingFromJSON(t *testing.T) { + tr := true + fa := false tests := []struct { name string req string - exp TriState + exp *bool }{ { name: "Undefined", req: `{ }`, - exp: TriStateUndefined, + exp: nil, }, { name: "True", req: `{ "use_mmap": true }`, - exp: TriStateTrue, + exp: &tr, }, { name: "False", req: `{ "use_mmap": false }`, - exp: TriStateFalse, + exp: &fa, }, } @@ -144,50 +146,52 @@ func TestUseMmapParsingFromJSON(t *testing.T) { } func TestUseMmapFormatParams(t *testing.T) { + tr := true + fa := false tests := []struct { name string req map[string][]string - exp TriState + exp *bool err error }{ { name: "True", req: map[string][]string{ - "use_mmap": []string{"true"}, + "use_mmap": {"true"}, }, - exp: TriStateTrue, + exp: &tr, err: nil, }, { name: "False", req: map[string][]string{ - "use_mmap": []string{"false"}, + "use_mmap": {"false"}, }, - exp: TriStateFalse, + exp: &fa, err: nil, }, { name: "Numeric True", req: map[string][]string{ - "use_mmap": []string{"1"}, + "use_mmap": {"1"}, }, - exp: TriStateTrue, + exp: &tr, err: nil, }, { name: "Numeric False", req: map[string][]string{ - "use_mmap": []string{"0"}, + "use_mmap": {"0"}, }, - exp: TriStateFalse, + exp: &fa, err: nil, }, { name: "invalid string", req: map[string][]string{ - "use_mmap": []string{"foo"}, + "use_mmap": {"foo"}, }, - exp: TriStateUndefined, + exp: nil, err: fmt.Errorf("invalid bool value [foo]"), }, } @@ -195,11 +199,11 @@ func TestUseMmapFormatParams(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { resp, err := FormatParams(test.req) - require.Equal(t, err, test.err) + require.Equal(t, test.err, err) respVal, ok := resp["use_mmap"] - if test.exp != TriStateUndefined { + if test.exp != nil { assert.True(t, ok, "resp: %v", resp) - assert.Equal(t, test.exp, respVal) + assert.Equal(t, *test.exp, *respVal.(*bool)) } }) } diff --git a/llm/server.go b/llm/server.go index 61346069e..821f6efdc 100644 --- a/llm/server.go +++ b/llm/server.go @@ -208,7 +208,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr if g.Library == "metal" && uint64(opts.NumGPU) > 0 && uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 { - opts.UseMMap = api.TriStateFalse + opts.UseMMap = new(bool) + *opts.UseMMap = false } } @@ -219,10 +220,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr // Windows CUDA should not use mmap for best performance // Linux with a model larger than free space, mmap leads to thrashing // For CPU loads we want the memory to be allocated, not FS cache - if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) || - (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) || - (gpus[0].Library == "cpu" && opts.UseMMap == api.TriStateUndefined) || - opts.UseMMap == api.TriStateFalse { + if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) || + (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) || + (gpus[0].Library == "cpu" && opts.UseMMap == nil) || + (opts.UseMMap != nil && !*opts.UseMMap) { params = append(params, "--no-mmap") } From 26e4e66faff20a94bb8fee9ec2bc3e17a07fb19e Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Mon, 1 Jul 2024 09:43:49 -0700 Subject: [PATCH 031/106] updated parsefile test --- parser/parser_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/parser/parser_test.go b/parser/parser_test.go index 3dc592239..171bd4206 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -22,7 +22,13 @@ ADAPTER adapter1 LICENSE MIT PARAMETER param1 value1 PARAMETER param2 value2 -TEMPLATE template1 +TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|> + +{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> + +{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> + +{{ .Response }}<|eot_id|>""" ` reader := strings.NewReader(input) @@ -36,7 +42,7 @@ TEMPLATE template1 {Name: "license", Args: "MIT"}, {Name: "param1", Args: "value1"}, {Name: "param2", Args: "value2"}, - {Name: "template", Args: "template1"}, + {Name: "template", Args: "{{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|>"}, } assert.Equal(t, expectedCommands, modelfile.Commands) From cff3f44f4a4097de864d70d9a95f31c62e8ecdfa Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 1 Jul 2024 09:43:59 -0700 Subject: [PATCH 032/106] Fix case for NumCtx --- server/sched.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/server/sched.go b/server/sched.go index 87da1db47..71b535ae2 100644 --- a/server/sched.go +++ b/server/sched.go @@ -23,7 +23,7 @@ type LlmRequest struct { ctx context.Context //nolint:containedctx model *Model opts api.Options - origNumCTX int // Track the initial ctx request + origNumCtx int // Track the initial ctx request sessionDuration time.Duration successCh chan *runnerRef errCh chan error @@ -118,8 +118,8 @@ func (s *Scheduler) processPending(ctx context.Context) { case pending := <-s.pendingReqCh: // Block other requests until we get this pending request running pending.schedAttempts++ - if pending.origNumCTX == 0 { - pending.origNumCTX = pending.opts.NumCtx + if pending.origNumCtx == 0 { + pending.origNumCtx = pending.opts.NumCtx } if pending.ctx.Err() != nil { @@ -135,7 +135,7 @@ func (s *Scheduler) processPending(ctx context.Context) { } // Keep NumCtx and numParallel in sync if numParallel > 1 { - pending.opts.NumCtx = pending.origNumCTX * numParallel + pending.opts.NumCtx = pending.origNumCtx * numParallel } for { @@ -197,7 +197,7 @@ func (s *Scheduler) processPending(ctx context.Context) { // simplifying assumption of defaultParallel when in CPU mode if numParallel <= 0 { numParallel = defaultParallel - pending.opts.NumCtx = pending.origNumCTX * numParallel + pending.opts.NumCtx = pending.origNumCtx * numParallel } if loadedCount == 0 { @@ -691,7 +691,7 @@ func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numP // First attempt to fit the model into a single GPU for _, p := range numParallelToTry { - req.opts.NumCtx = req.origNumCTX * p + req.opts.NumCtx = req.origNumCtx * p if !envconfig.SchedSpread { for _, g := range sgl { if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok { @@ -709,7 +709,7 @@ func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numP // Now try all the GPUs for _, p := range numParallelToTry { - req.opts.NumCtx = req.origNumCTX * p + req.opts.NumCtx = req.origNumCtx * p if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok { slog.Info("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "parallel", p, "required", format.HumanBytes2(estimatedVRAM)) *numParallel = p From 173b5504381a77b042f3957226a23c0569406aca Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 1 Jul 2024 09:48:05 -0700 Subject: [PATCH 033/106] Remove default auto from help message This may confuse users thinking "auto" is an acceptable string - it must be numeric --- envconfig/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/envconfig/config.go b/envconfig/config.go index 0f0f7f058..c02c4878e 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -85,13 +85,13 @@ func AsMap() map[string]EnvVar { "OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"}, "OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"}, "OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"}, - "OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU (default auto)"}, + "OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU"}, "OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"}, "OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"}, "OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"}, "OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"}, "OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"}, - "OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default auto)"}, + "OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests"}, "OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"}, "OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"}, "OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"}, From 3f0b309ad4c49c0d87839e50fe6a46163902aba0 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 10 Jun 2024 08:47:13 -0700 Subject: [PATCH 034/106] remove ManifestV2 --- server/images.go | 17 +++++------------ server/manifest.go | 20 +++++++++++--------- server/manifest_test.go | 2 +- 3 files changed, 17 insertions(+), 22 deletions(-) diff --git a/server/images.go b/server/images.go index e949fb18a..447a63a69 100644 --- a/server/images.go +++ b/server/images.go @@ -135,13 +135,6 @@ type Message struct { Content string `json:"content"` } -type ManifestV2 struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Config *Layer `json:"config"` - Layers []*Layer `json:"layers"` -} - type ConfigV2 struct { ModelFormat string `json:"model_format"` ModelFamily string `json:"model_family"` @@ -160,7 +153,7 @@ type RootFS struct { DiffIDs []string `json:"diff_ids"` } -func GetManifest(mp ModelPath) (*ManifestV2, string, error) { +func GetManifest(mp ModelPath) (*Manifest, string, error) { fp, err := mp.GetManifestPath() if err != nil { return nil, "", err @@ -170,7 +163,7 @@ func GetManifest(mp ModelPath) (*ManifestV2, string, error) { return nil, "", err } - var manifest *ManifestV2 + var manifest *Manifest bts, err := os.ReadFile(fp) if err != nil { @@ -822,7 +815,7 @@ func PushModel(ctx context.Context, name string, regOpts *registryOptions, fn fu func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn func(api.ProgressResponse)) error { mp := ParseModelPath(name) - var manifest *ManifestV2 + var manifest *Manifest var err error var noprune string @@ -929,7 +922,7 @@ func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn fu return nil } -func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptions) (*ManifestV2, error) { +func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptions) (*Manifest, error) { requestURL := mp.BaseURL().JoinPath("v2", mp.GetNamespaceRepository(), "manifests", mp.Tag) headers := make(http.Header) @@ -940,7 +933,7 @@ func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptio } defer resp.Body.Close() - var m *ManifestV2 + var m *Manifest if err := json.NewDecoder(resp.Body).Decode(&m); err != nil { return nil, err } diff --git a/server/manifest.go b/server/manifest.go index 61dd1ab4e..726bb48d8 100644 --- a/server/manifest.go +++ b/server/manifest.go @@ -14,7 +14,10 @@ import ( ) type Manifest struct { - ManifestV2 + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config *Layer `json:"config"` + Layers []*Layer `json:"layers"` filepath string fi os.FileInfo @@ -66,7 +69,7 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) { p := filepath.Join(manifests, n.Filepath()) - var m ManifestV2 + var m Manifest f, err := os.Open(p) if err != nil { return nil, err @@ -83,12 +86,11 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) { return nil, err } - return &Manifest{ - ManifestV2: m, - filepath: p, - fi: fi, - digest: fmt.Sprintf("%x", sha256sum.Sum(nil)), - }, nil + m.filepath = p + m.fi = fi + m.digest = fmt.Sprintf("%x", sha256sum.Sum(nil)) + + return &m, nil } func WriteManifest(name model.Name, config *Layer, layers []*Layer) error { @@ -108,7 +110,7 @@ func WriteManifest(name model.Name, config *Layer, layers []*Layer) error { } defer f.Close() - m := ManifestV2{ + m := Manifest{ SchemaVersion: 2, MediaType: "application/vnd.docker.distribution.manifest.v2+json", Config: config, diff --git a/server/manifest_test.go b/server/manifest_test.go index ceee31d88..ca6c3d2e9 100644 --- a/server/manifest_test.go +++ b/server/manifest_test.go @@ -25,7 +25,7 @@ func createManifest(t *testing.T, path, name string) { } defer f.Close() - if err := json.NewEncoder(f).Encode(ManifestV2{}); err != nil { + if err := json.NewEncoder(f).Encode(Manifest{}); err != nil { t.Fatal(err) } } From 58e3fff311f9e7abec20cdfe20fa43958e447aeb Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 10 Jun 2024 14:54:42 -0700 Subject: [PATCH 035/106] rename templates to template --- server/images.go | 26 ++- server/model.go | 4 +- server/prompt.go | 18 +- server/prompt_test.go | 15 +- server/routes.go | 26 ++- {templates => template}/alfred.gotmpl | 0 {templates => template}/alpaca.gotmpl | 0 {templates => template}/chatml.gotmpl | 0 {templates => template}/chatqa.gotmpl | 0 .../codellama-70b-instruct.gotmpl | 0 .../falcon-instruct.gotmpl | 0 {templates => template}/gemma-instruct.gotmpl | 0 .../granite-instruct.gotmpl | 0 {templates => template}/index.json | 0 {templates => template}/llama2-chat.gotmpl | 0 .../llama3-instruct.gotmpl | 0 {templates => template}/magicoder.gotmpl | 0 .../mistral-instruct.gotmpl | 0 {templates => template}/openchat.gotmpl | 0 {templates => template}/phi-3.gotmpl | 0 {templates => template}/solar-instruct.gotmpl | 0 .../starcoder2-instruct.gotmpl | 0 template/template.go | 158 ++++++++++++++++++ template/template_test.go | 89 ++++++++++ .../testdata/templates.jsonl | 0 {templates => template}/vicuna.gotmpl | 0 {templates => template}/zephyr.gotmpl | 0 templates/template.go | 70 -------- templates/template_test.go | 59 ------- 29 files changed, 301 insertions(+), 164 deletions(-) rename {templates => template}/alfred.gotmpl (100%) rename {templates => template}/alpaca.gotmpl (100%) rename {templates => template}/chatml.gotmpl (100%) rename {templates => template}/chatqa.gotmpl (100%) rename {templates => template}/codellama-70b-instruct.gotmpl (100%) rename {templates => template}/falcon-instruct.gotmpl (100%) rename {templates => template}/gemma-instruct.gotmpl (100%) rename {templates => template}/granite-instruct.gotmpl (100%) rename {templates => template}/index.json (100%) rename {templates => template}/llama2-chat.gotmpl (100%) rename {templates => template}/llama3-instruct.gotmpl (100%) rename {templates => template}/magicoder.gotmpl (100%) rename {templates => template}/mistral-instruct.gotmpl (100%) rename {templates => template}/openchat.gotmpl (100%) rename {templates => template}/phi-3.gotmpl (100%) rename {templates => template}/solar-instruct.gotmpl (100%) rename {templates => template}/starcoder2-instruct.gotmpl (100%) create mode 100644 template/template.go create mode 100644 template/template_test.go rename {templates => template}/testdata/templates.jsonl (100%) rename {templates => template}/vicuna.gotmpl (100%) rename {templates => template}/zephyr.gotmpl (100%) delete mode 100644 templates/template.go delete mode 100644 templates/template_test.go diff --git a/server/images.go b/server/images.go index 447a63a69..65ed51c76 100644 --- a/server/images.go +++ b/server/images.go @@ -28,6 +28,7 @@ import ( "github.com/ollama/ollama/format" "github.com/ollama/ollama/llm" "github.com/ollama/ollama/parser" + "github.com/ollama/ollama/template" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" "github.com/ollama/ollama/version" @@ -48,12 +49,13 @@ type Model struct { ParentModel string AdapterPaths []string ProjectorPaths []string - Template string System string License []string Digest string Options map[string]interface{} Messages []Message + + Template *template.Template } func (m *Model) IsEmbedding() bool { @@ -82,10 +84,10 @@ func (m *Model) String() string { }) } - if m.Template != "" { + if m.Template != nil { modelfile.Commands = append(modelfile.Commands, parser.Command{ Name: "template", - Args: m.Template, + Args: m.Template.String(), }) } @@ -191,8 +193,7 @@ func GetModel(name string) (*Model, error) { Name: mp.GetFullTagname(), ShortName: mp.GetShortTagname(), Digest: digest, - Template: "{{ .Prompt }}", - License: []string{}, + Template: template.DefaultTemplate, } filename, err := GetBlobsPath(manifest.Config.Digest) @@ -228,13 +229,17 @@ func GetModel(name string) (*Model, error) { model.AdapterPaths = append(model.AdapterPaths, filename) case "application/vnd.ollama.image.projector": model.ProjectorPaths = append(model.ProjectorPaths, filename) - case "application/vnd.ollama.image.template": + case "application/vnd.ollama.image.prompt", + "application/vnd.ollama.image.template": bts, err := os.ReadFile(filename) if err != nil { return nil, err } - model.Template = string(bts) + model.Template, err = template.Parse(string(bts)) + if err != nil { + return nil, err + } case "application/vnd.ollama.image.system": bts, err := os.ReadFile(filename) if err != nil { @@ -242,13 +247,6 @@ func GetModel(name string) (*Model, error) { } model.System = string(bts) - case "application/vnd.ollama.image.prompt": - bts, err := os.ReadFile(filename) - if err != nil { - return nil, err - } - - model.Template = string(bts) case "application/vnd.ollama.image.params": params, err := os.Open(filename) if err != nil { diff --git a/server/model.go b/server/model.go index d56e641ba..6abb5b392 100644 --- a/server/model.go +++ b/server/model.go @@ -16,7 +16,7 @@ import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/convert" "github.com/ollama/ollama/llm" - "github.com/ollama/ollama/templates" + "github.com/ollama/ollama/template" "github.com/ollama/ollama/types/model" ) @@ -258,7 +258,7 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) { for _, layer := range layers { if s := layer.GGML.KV().ChatTemplate(); s != "" { - if t, err := templates.NamedTemplate(s); err != nil { + if t, err := template.Named(s); err != nil { slog.Debug("template detection", "error", err) } else { tmpl, err := NewLayer(t.Reader(), "application/vnd.ollama.image.template") diff --git a/server/prompt.go b/server/prompt.go index 604e69717..bfc319a50 100644 --- a/server/prompt.go +++ b/server/prompt.go @@ -4,10 +4,11 @@ import ( "fmt" "log/slog" "strings" - "text/template" + "text/template/parse" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/template" ) // isResponseNode checks if the node contains .Response @@ -53,13 +54,8 @@ func formatTemplateForResponse(tmpl *template.Template, generate bool) { // Prompt renders a prompt from a template. If generate is set to true, // the response and parts of the template following it are not rendered -func Prompt(tmpl, system, prompt, response string, generate bool) (string, error) { - parsed, err := template.New("").Option("missingkey=zero").Parse(tmpl) - if err != nil { - return "", err - } - - formatTemplateForResponse(parsed, generate) +func Prompt(tmpl *template.Template, system, prompt, response string, generate bool) (string, error) { + formatTemplateForResponse(tmpl, generate) vars := map[string]any{ "System": system, @@ -68,14 +64,14 @@ func Prompt(tmpl, system, prompt, response string, generate bool) (string, error } var sb strings.Builder - if err := parsed.Execute(&sb, vars); err != nil { + if err := tmpl.Execute(&sb, vars); err != nil { return "", err } return sb.String(), nil } -func countTokens(tmpl string, system string, prompt string, response string, encode func(string) ([]int, error)) (int, error) { +func countTokens(tmpl *template.Template, system string, prompt string, response string, encode func(string) ([]int, error)) (int, error) { rendered, err := Prompt(tmpl, system, prompt, response, false) if err != nil { return 0, err @@ -91,7 +87,7 @@ func countTokens(tmpl string, system string, prompt string, response string, enc } // ChatPrompt builds up a prompt from a series of messages, truncating based on context window size -func ChatPrompt(tmpl string, messages []api.Message, window int, encode func(string) ([]int, error)) (string, error) { +func ChatPrompt(tmpl *template.Template, messages []api.Message, window int, encode func(string) ([]int, error)) (string, error) { type prompt struct { System string Prompt string diff --git a/server/prompt_test.go b/server/prompt_test.go index a7e18a70f..7df58d0bd 100644 --- a/server/prompt_test.go +++ b/server/prompt_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/template" ) func TestPrompt(t *testing.T) { @@ -61,7 +62,12 @@ func TestPrompt(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got, err := Prompt(tc.template, tc.system, tc.prompt, tc.response, tc.generate) + tmpl, err := template.Parse(tc.template) + if err != nil { + t.Fatal(err) + } + + got, err := Prompt(tmpl, tc.system, tc.prompt, tc.response, tc.generate) if err != nil { t.Errorf("error = %v", err) } @@ -192,7 +198,12 @@ func TestChatPrompt(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got, err := ChatPrompt(tc.template, tc.messages, tc.window, encode) + tmpl, err := template.Parse(tc.template) + if err != nil { + t.Fatal(err) + } + + got, err := ChatPrompt(tmpl, tc.messages, tc.window, encode) if err != nil { t.Errorf("error = %v", err) } diff --git a/server/routes.go b/server/routes.go index 76ead072f..d8a4a67e7 100644 --- a/server/routes.go +++ b/server/routes.go @@ -31,6 +31,7 @@ import ( "github.com/ollama/ollama/llm" "github.com/ollama/ollama/openai" "github.com/ollama/ollama/parser" + "github.com/ollama/ollama/template" "github.com/ollama/ollama/types/errtypes" "github.com/ollama/ollama/types/model" "github.com/ollama/ollama/version" @@ -161,6 +162,12 @@ func (s *Server) GenerateHandler(c *gin.Context) { return } + tmpl, err := template.Parse(req.Template) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + checkpointLoaded := time.Now() var prompt string @@ -169,7 +176,11 @@ func (s *Server) GenerateHandler(c *gin.Context) { prompt = req.Prompt case req.Prompt != "": if req.Template == "" { - req.Template = model.Template + model.Template, err = template.Parse(req.Template) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } } if req.System == "" { @@ -187,7 +198,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { sb.WriteString(req.Prompt) - p, err := Prompt(req.Template, req.System, sb.String(), "", true) + p, err := Prompt(tmpl, req.System, sb.String(), "", true) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return @@ -242,7 +253,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { resp.LoadDuration = checkpointLoaded.Sub(checkpointStart) if !req.Raw { - p, err := Prompt(req.Template, req.System, req.Prompt, generated.String(), false) + p, err := Prompt(tmpl, req.System, req.Prompt, generated.String(), false) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return @@ -680,7 +691,10 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) { } if req.Template != "" { - m.Template = req.Template + m.Template, err = template.Parse(req.Template) + if err != nil { + return nil, err + } } msgs := make([]api.Message, 0) @@ -701,7 +715,7 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) { resp := &api.ShowResponse{ License: strings.Join(m.License, "\n"), System: m.System, - Template: m.Template, + Template: m.Template.String(), Details: modelDetails, Messages: msgs, ModifiedAt: manifest.fi.ModTime(), @@ -1246,7 +1260,7 @@ func (s *Server) ProcessHandler(c *gin.Context) { } // ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model -func chatPrompt(ctx context.Context, runner *runnerRef, template string, messages []api.Message, numCtx int) (string, error) { +func chatPrompt(ctx context.Context, runner *runnerRef, template *template.Template, messages []api.Message, numCtx int) (string, error) { encode := func(s string) ([]int, error) { return runner.llama.Tokenize(ctx, s) } diff --git a/templates/alfred.gotmpl b/template/alfred.gotmpl similarity index 100% rename from templates/alfred.gotmpl rename to template/alfred.gotmpl diff --git a/templates/alpaca.gotmpl b/template/alpaca.gotmpl similarity index 100% rename from templates/alpaca.gotmpl rename to template/alpaca.gotmpl diff --git a/templates/chatml.gotmpl b/template/chatml.gotmpl similarity index 100% rename from templates/chatml.gotmpl rename to template/chatml.gotmpl diff --git a/templates/chatqa.gotmpl b/template/chatqa.gotmpl similarity index 100% rename from templates/chatqa.gotmpl rename to template/chatqa.gotmpl diff --git a/templates/codellama-70b-instruct.gotmpl b/template/codellama-70b-instruct.gotmpl similarity index 100% rename from templates/codellama-70b-instruct.gotmpl rename to template/codellama-70b-instruct.gotmpl diff --git a/templates/falcon-instruct.gotmpl b/template/falcon-instruct.gotmpl similarity index 100% rename from templates/falcon-instruct.gotmpl rename to template/falcon-instruct.gotmpl diff --git a/templates/gemma-instruct.gotmpl b/template/gemma-instruct.gotmpl similarity index 100% rename from templates/gemma-instruct.gotmpl rename to template/gemma-instruct.gotmpl diff --git a/templates/granite-instruct.gotmpl b/template/granite-instruct.gotmpl similarity index 100% rename from templates/granite-instruct.gotmpl rename to template/granite-instruct.gotmpl diff --git a/templates/index.json b/template/index.json similarity index 100% rename from templates/index.json rename to template/index.json diff --git a/templates/llama2-chat.gotmpl b/template/llama2-chat.gotmpl similarity index 100% rename from templates/llama2-chat.gotmpl rename to template/llama2-chat.gotmpl diff --git a/templates/llama3-instruct.gotmpl b/template/llama3-instruct.gotmpl similarity index 100% rename from templates/llama3-instruct.gotmpl rename to template/llama3-instruct.gotmpl diff --git a/templates/magicoder.gotmpl b/template/magicoder.gotmpl similarity index 100% rename from templates/magicoder.gotmpl rename to template/magicoder.gotmpl diff --git a/templates/mistral-instruct.gotmpl b/template/mistral-instruct.gotmpl similarity index 100% rename from templates/mistral-instruct.gotmpl rename to template/mistral-instruct.gotmpl diff --git a/templates/openchat.gotmpl b/template/openchat.gotmpl similarity index 100% rename from templates/openchat.gotmpl rename to template/openchat.gotmpl diff --git a/templates/phi-3.gotmpl b/template/phi-3.gotmpl similarity index 100% rename from templates/phi-3.gotmpl rename to template/phi-3.gotmpl diff --git a/templates/solar-instruct.gotmpl b/template/solar-instruct.gotmpl similarity index 100% rename from templates/solar-instruct.gotmpl rename to template/solar-instruct.gotmpl diff --git a/templates/starcoder2-instruct.gotmpl b/template/starcoder2-instruct.gotmpl similarity index 100% rename from templates/starcoder2-instruct.gotmpl rename to template/starcoder2-instruct.gotmpl diff --git a/template/template.go b/template/template.go new file mode 100644 index 000000000..d15f7156f --- /dev/null +++ b/template/template.go @@ -0,0 +1,158 @@ +package template + +import ( + "bytes" + "embed" + "encoding/json" + "errors" + "io" + "math" + "slices" + "strings" + "sync" + "text/template" + "text/template/parse" + + "github.com/agnivade/levenshtein" + "golang.org/x/exp/maps" +) + +//go:embed index.json +var indexBytes []byte + +//go:embed *.gotmpl +var templatesFS embed.FS + +var templatesOnce = sync.OnceValues(func() ([]*named, error) { + var templates []*named + if err := json.Unmarshal(indexBytes, &templates); err != nil { + return nil, err + } + + for _, t := range templates { + bts, err := templatesFS.ReadFile(t.Name + ".gotmpl") + if err != nil { + return nil, err + } + + // normalize line endings + t.Bytes = bytes.ReplaceAll(bts, []byte("\r\n"), []byte("\n")) + } + + return templates, nil +}) + +type named struct { + Name string `json:"name"` + Template string `json:"template"` + Bytes []byte +} + +func (t named) Reader() io.Reader { + return bytes.NewReader(t.Bytes) +} + +func Named(s string) (*named, error) { + templates, err := templatesOnce() + if err != nil { + return nil, err + } + + var template *named + score := math.MaxInt + for _, t := range templates { + if s := levenshtein.ComputeDistance(s, t.Template); s < score { + score = s + template = t + } + } + + if score < 100 { + return template, nil + } + + return nil, errors.New("no matching template found") +} + +type Template struct { + *template.Template + raw string +} + +func (t *Template) String() string { + return t.raw +} + +var DefaultTemplate, _ = Parse("{{ .Prompt }}") + +func Parse(s string) (*Template, error) { + t, err := template.New("").Option("missingkey=zero").Parse(s) + if err != nil { + return nil, err + } + + return &Template{Template: t, raw: s}, nil +} + +func (t *Template) Vars() []string { + var vars []string + for _, n := range t.Tree.Root.Nodes { + vars = append(vars, parseNode(n)...) + } + + set := make(map[string]struct{}) + for _, n := range vars { + set[strings.ToLower(n)] = struct{}{} + } + + vars = maps.Keys(set) + slices.Sort(vars) + return vars +} + +func parseNode(n parse.Node) []string { + switch n := n.(type) { + case *parse.ActionNode: + return parseNode(n.Pipe) + case *parse.IfNode: + names := parseNode(n.Pipe) + names = append(names, parseNode(n.List)...) + if n.ElseList != nil { + names = append(names, parseNode(n.ElseList)...) + } + return names + case *parse.RangeNode: + names := parseNode(n.Pipe) + names = append(names, parseNode(n.List)...) + if n.ElseList != nil { + names = append(names, parseNode(n.ElseList)...) + } + return names + case *parse.WithNode: + names := parseNode(n.Pipe) + names = append(names, parseNode(n.List)...) + if n.ElseList != nil { + names = append(names, parseNode(n.ElseList)...) + } + return names + case *parse.PipeNode: + var names []string + for _, c := range n.Cmds { + for _, a := range c.Args { + names = append(names, parseNode(a)...) + } + } + return names + case *parse.ListNode: + var names []string + for _, n := range n.Nodes { + names = append(names, parseNode(n)...) + } + + return names + case *parse.FieldNode: + return n.Ident + } + + return nil +} diff --git a/template/template_test.go b/template/template_test.go new file mode 100644 index 000000000..e5405bdb4 --- /dev/null +++ b/template/template_test.go @@ -0,0 +1,89 @@ +package template + +import ( + "bufio" + "bytes" + "encoding/json" + "io" + "os" + "path/filepath" + "slices" + "testing" + "text/template" + + "github.com/ollama/ollama/llm" +) + +func TestNamed(t *testing.T) { + f, err := os.Open(filepath.Join("testdata", "templates.jsonl")) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + var ss map[string]string + if err := json.Unmarshal(scanner.Bytes(), &ss); err != nil { + t.Fatal(err) + } + + for k, v := range ss { + t.Run(k, func(t *testing.T) { + kv := llm.KV{"tokenizer.chat_template": v} + s := kv.ChatTemplate() + r, err := Named(s) + if err != nil { + t.Fatal(err) + } + + if r.Name != k { + t.Errorf("expected %q, got %q", k, r.Name) + } + + var b bytes.Buffer + if _, err := io.Copy(&b, r.Reader()); err != nil { + t.Fatal(err) + } + + tmpl, err := template.New(s).Parse(b.String()) + if err != nil { + t.Fatal(err) + } + + if tmpl.Tree.Root.String() == "" { + t.Errorf("empty %s template", k) + } + }) + } + } +} + +func TestParse(t *testing.T) { + cases := []struct { + template string + capabilities []string + }{ + {"{{ .Prompt }}", []string{"prompt"}}, + {"{{ .System }} {{ .Prompt }}", []string{"prompt", "system"}}, + {"{{ .System }} {{ .Prompt }} {{ .Response }}", []string{"prompt", "response", "system"}}, + {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "system", "tools"}}, + {"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}}, + {"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}}, + {"{{ .Prompt }} {{ .Suffix }}", []string{"prompt", "suffix"}}, + } + + for _, tt := range cases { + t.Run("", func(t *testing.T) { + tmpl, err := Parse(tt.template) + if err != nil { + t.Fatal(err) + } + + vars := tmpl.Vars() + if !slices.Equal(tt.capabilities, vars) { + t.Errorf("expected %v, got %v", tt.capabilities, vars) + } + }) + } +} diff --git a/templates/testdata/templates.jsonl b/template/testdata/templates.jsonl similarity index 100% rename from templates/testdata/templates.jsonl rename to template/testdata/templates.jsonl diff --git a/templates/vicuna.gotmpl b/template/vicuna.gotmpl similarity index 100% rename from templates/vicuna.gotmpl rename to template/vicuna.gotmpl diff --git a/templates/zephyr.gotmpl b/template/zephyr.gotmpl similarity index 100% rename from templates/zephyr.gotmpl rename to template/zephyr.gotmpl diff --git a/templates/template.go b/templates/template.go deleted file mode 100644 index 72bd69e9d..000000000 --- a/templates/template.go +++ /dev/null @@ -1,70 +0,0 @@ -package templates - -import ( - "bytes" - "embed" - "encoding/json" - "errors" - "io" - "math" - "sync" - - "github.com/agnivade/levenshtein" -) - -//go:embed index.json -var indexBytes []byte - -//go:embed *.gotmpl -var templatesFS embed.FS - -var templatesOnce = sync.OnceValues(func() ([]*Template, error) { - var templates []*Template - if err := json.Unmarshal(indexBytes, &templates); err != nil { - return nil, err - } - - for _, t := range templates { - bts, err := templatesFS.ReadFile(t.Name + ".gotmpl") - if err != nil { - return nil, err - } - - // normalize line endings - t.Bytes = bytes.ReplaceAll(bts, []byte("\r\n"), []byte("\n")) - } - - return templates, nil -}) - -type Template struct { - Name string `json:"name"` - Template string `json:"template"` - Bytes []byte -} - -func (t Template) Reader() io.Reader { - return bytes.NewReader(t.Bytes) -} - -func NamedTemplate(s string) (*Template, error) { - templates, err := templatesOnce() - if err != nil { - return nil, err - } - - var template *Template - score := math.MaxInt - for _, t := range templates { - if s := levenshtein.ComputeDistance(s, t.Template); s < score { - score = s - template = t - } - } - - if score < 100 { - return template, nil - } - - return nil, errors.New("no matching template found") -} diff --git a/templates/template_test.go b/templates/template_test.go deleted file mode 100644 index 61bc78374..000000000 --- a/templates/template_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package templates - -import ( - "bufio" - "bytes" - "encoding/json" - "io" - "os" - "path/filepath" - "testing" - "text/template" - - "github.com/ollama/ollama/llm" -) - -func TestKVChatTemplate(t *testing.T) { - f, err := os.Open(filepath.Join("testdata", "templates.jsonl")) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - var ss map[string]string - if err := json.Unmarshal(scanner.Bytes(), &ss); err != nil { - t.Fatal(err) - } - - for k, v := range ss { - t.Run(k, func(t *testing.T) { - kv := llm.KV{"tokenizer.chat_template": v} - s := kv.ChatTemplate() - r, err := NamedTemplate(s) - if err != nil { - t.Fatal(err) - } - - if r.Name != k { - t.Errorf("expected %q, got %q", k, r.Name) - } - - var b bytes.Buffer - if _, err := io.Copy(&b, r.Reader()); err != nil { - t.Fatal(err) - } - - tmpl, err := template.New(s).Parse(b.String()) - if err != nil { - t.Fatal(err) - } - - if tmpl.Tree.Root.String() == "" { - t.Errorf("empty %s template", k) - } - }) - } - } -} From a30915bde166b2f392a0ff72c61c9ac53189a962 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 11 Jun 2024 14:03:42 -0700 Subject: [PATCH 036/106] add capabilities --- server/images.go | 20 ++++++++++++++++++-- server/routes.go | 8 ++++---- template/template_test.go | 8 ++++---- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/server/images.go b/server/images.go index 65ed51c76..5cd0a7a53 100644 --- a/server/images.go +++ b/server/images.go @@ -34,6 +34,10 @@ import ( "github.com/ollama/ollama/version" ) +type Capability string + +const CapabilityCompletion = Capability("completion") + type registryOptions struct { Insecure bool Username string @@ -58,8 +62,20 @@ type Model struct { Template *template.Template } -func (m *Model) IsEmbedding() bool { - return slices.Contains(m.Config.ModelFamilies, "bert") || slices.Contains(m.Config.ModelFamilies, "nomic-bert") +func (m *Model) Has(caps ...Capability) bool { + for _, cap := range caps { + switch cap { + case CapabilityCompletion: + if slices.Contains(m.Config.ModelFamilies, "bert") || slices.Contains(m.Config.ModelFamilies, "nomic-bert") { + return false + } + default: + slog.Error("unknown capability", "capability", cap) + return false + } + } + + return true } func (m *Model) String() string { diff --git a/server/routes.go b/server/routes.go index d8a4a67e7..8ca6dcc89 100644 --- a/server/routes.go +++ b/server/routes.go @@ -122,8 +122,8 @@ func (s *Server) GenerateHandler(c *gin.Context) { return } - if model.IsEmbedding() { - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "embedding models do not support generate"}) + if !model.Has(CapabilityCompletion) { + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support generate", req.Model)}) return } @@ -1308,8 +1308,8 @@ func (s *Server) ChatHandler(c *gin.Context) { return } - if model.IsEmbedding() { - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "embedding models do not support chat"}) + if !model.Has(CapabilityCompletion) { + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support chat", req.Model)}) return } diff --git a/template/template_test.go b/template/template_test.go index e5405bdb4..eda4634f4 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -61,8 +61,8 @@ func TestNamed(t *testing.T) { func TestParse(t *testing.T) { cases := []struct { - template string - capabilities []string + template string + vars []string }{ {"{{ .Prompt }}", []string{"prompt"}}, {"{{ .System }} {{ .Prompt }}", []string{"prompt", "system"}}, @@ -81,8 +81,8 @@ func TestParse(t *testing.T) { } vars := tmpl.Vars() - if !slices.Equal(tt.capabilities, vars) { - t.Errorf("expected %v, got %v", tt.capabilities, vars) + if !slices.Equal(tt.vars, vars) { + t.Errorf("expected %v, got %v", tt.vars, vars) } }) } From da8e2a04479f96ad9c57eaf25ed26b79b239b05c Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 14 Jun 2024 14:57:49 -0700 Subject: [PATCH 037/106] use kvs to detect embedding models --- server/images.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/server/images.go b/server/images.go index 5cd0a7a53..a62991f16 100644 --- a/server/images.go +++ b/server/images.go @@ -66,7 +66,21 @@ func (m *Model) Has(caps ...Capability) bool { for _, cap := range caps { switch cap { case CapabilityCompletion: - if slices.Contains(m.Config.ModelFamilies, "bert") || slices.Contains(m.Config.ModelFamilies, "nomic-bert") { + f, err := os.Open(m.ModelPath) + if err != nil { + slog.Error("couldn't open model file", "error", err) + continue + } + defer f.Close() + + // TODO(mxyng): decode the GGML into model to avoid doing this multiple times + ggml, _, err := llm.DecodeGGML(f, 0) + if err != nil { + slog.Error("couldn't decode ggml", "error", err) + continue + } + + if _, ok := ggml.KV()[fmt.Sprintf("%s.pooling_type", ggml.KV().Architecture())]; ok { return false } default: From 7e571f95f0306f90e4f754e34df96ebc36f93626 Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Mon, 1 Jul 2024 11:07:48 -0700 Subject: [PATCH 038/106] trimspace test case --- parser/parser_test.go | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/parser/parser_test.go b/parser/parser_test.go index 171bd4206..2b5c4c888 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -48,6 +48,39 @@ TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|> assert.Equal(t, expectedCommands, modelfile.Commands) } +func TestParseFileTrimSpace(t *testing.T) { + input := ` +FROM " model 1" +ADAPTER adapter3 +LICENSE "MIT " +PARAMETER param1 value1 +PARAMETER param2 value2 +TEMPLATE """ {{ if .System }}<|start_header_id|>system<|end_header_id|> + +{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> + +{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> + +{{ .Response }}<|eot_id|> """ +` + + reader := strings.NewReader(input) + + modelfile, err := ParseFile(reader) + require.NoError(t, err) + + expectedCommands := []Command{ + {Name: "model", Args: " model 1"}, + {Name: "adapter", Args: "adapter3"}, + {Name: "license", Args: "MIT "}, + {Name: "param1", Args: "value1"}, + {Name: "param2", Args: "value2"}, + {Name: "template", Args: " {{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|> "}, + } + + assert.Equal(t, expectedCommands, modelfile.Commands) +} + func TestParseFileFrom(t *testing.T) { var cases = []struct { input string From 88bcd79bb9a4b2baa739efe2ccabcbcf3c89bdb5 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Sun, 30 Jun 2024 11:10:40 -0700 Subject: [PATCH 039/106] err on insecure path --- server/model.go | 8 +++----- server/model_test.go | 24 ++++++++++++++++++++++-- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/server/model.go b/server/model.go index d56e641ba..7d5957a18 100644 --- a/server/model.go +++ b/server/model.go @@ -11,7 +11,6 @@ import ( "net/http" "os" "path/filepath" - "strings" "github.com/ollama/ollama/api" "github.com/ollama/ollama/convert" @@ -91,12 +90,11 @@ func extractFromZipFile(p string, file *os.File, fn func(api.ProgressResponse)) fn(api.ProgressResponse{Status: "unpacking model metadata"}) for _, f := range r.File { - n := filepath.Join(p, f.Name) - if !strings.HasPrefix(n, p) { - slog.Warn("skipped extracting file outside of context", "name", f.Name) - continue + if !filepath.IsLocal(f.Name) { + return fmt.Errorf("%w: %s", zip.ErrInsecurePath, f.Name) } + n := filepath.Join(p, f.Name) if err := os.MkdirAll(filepath.Dir(n), 0o750); err != nil { return err } diff --git a/server/model_test.go b/server/model_test.go index c3023eb2b..a383b7e72 100644 --- a/server/model_test.go +++ b/server/model_test.go @@ -3,10 +3,12 @@ package server import ( "archive/zip" "bytes" + "errors" "io" "os" "path/filepath" "slices" + "strings" "testing" "github.com/ollama/ollama/api" @@ -39,13 +41,31 @@ func TestExtractFromZipFile(t *testing.T) { cases := []struct { name string expect []string + err error }{ { name: "good", expect: []string{"good"}, }, { - name: filepath.Join("..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "bad"), + name: strings.Join([]string{"path", "..", "to", "good"}, string(os.PathSeparator)), + expect: []string{filepath.Join("to", "good")}, + }, + { + name: strings.Join([]string{"path", "..", "to", "..", "good"}, string(os.PathSeparator)), + expect: []string{"good"}, + }, + { + name: strings.Join([]string{"path", "to", "..", "..", "good"}, string(os.PathSeparator)), + expect: []string{"good"}, + }, + { + name: strings.Join([]string{"..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "bad"}, string(os.PathSeparator)), + err: zip.ErrInsecurePath, + }, + { + name: strings.Join([]string{"path", "..", "..", "to", "bad"}, string(os.PathSeparator)), + err: zip.ErrInsecurePath, }, } @@ -55,7 +75,7 @@ func TestExtractFromZipFile(t *testing.T) { defer f.Close() tempDir := t.TempDir() - if err := extractFromZipFile(tempDir, f, func(api.ProgressResponse) {}); err != nil { + if err := extractFromZipFile(tempDir, f, func(api.ProgressResponse) {}); !errors.Is(err, tt.err) { t.Fatal(err) } From 33a65e3ba3ad5666d6ba8430efbccfa6d642d1de Mon Sep 17 00:00:00 2001 From: Josh Yan Date: Mon, 1 Jul 2024 16:04:13 -0700 Subject: [PATCH 040/106] error --- llm/server.go | 3 +++ llm/status.go | 1 + 2 files changed, 4 insertions(+) diff --git a/llm/server.go b/llm/server.go index 61346069e..8b63cfbd5 100644 --- a/llm/server.go +++ b/llm/server.go @@ -560,6 +560,9 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { if s.status != nil && s.status.LastErrMsg != "" { msg = s.status.LastErrMsg } + if strings.Contains(msg, "unknown model") { + return fmt.Errorf("this model is not supported by your version of Ollama. You may need to upgrade") + } return fmt.Errorf("llama runner process has terminated: %v %s", err, msg) default: } diff --git a/llm/status.go b/llm/status.go index 8a49bd55a..0f56b7f99 100644 --- a/llm/status.go +++ b/llm/status.go @@ -25,6 +25,7 @@ var errorPrefixes = []string{ "CUDA error", "cudaMalloc failed", "\"ERR\"", + "architecture", } func (w *StatusWriter) Write(b []byte) (int, error) { From 4f67b39d262b1997aa96c47585f1d8e8443d0f90 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 2 Jul 2024 09:22:17 -0700 Subject: [PATCH 041/106] Centos 7 EOL broke mirrors As of July 1st 2024: Could not resolve host: mirrorlist.centos.org This is expected due to EOL dates. --- scripts/rh_linux_deps.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/rh_linux_deps.sh b/scripts/rh_linux_deps.sh index ed60e4304..81648d68e 100644 --- a/scripts/rh_linux_deps.sh +++ b/scripts/rh_linux_deps.sh @@ -6,10 +6,21 @@ set -ex MACHINE=$(uname -m) if grep -i "centos" /etc/system-release >/dev/null; then + # As of 7/1/2024 mirrorlist.centos.org has been taken offline, so adjust accordingly + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo + # Centos 7 derivatives have too old of a git version to run our generate script # uninstall and ignore failures yum remove -y git yum -y install epel-release centos-release-scl + + # The release packages reinstate the mirrors, undo that again + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo + yum -y install dnf if [ "${MACHINE}" = "x86_64" ]; then yum -y install https://repo.ius.io/ius-release-el7.rpm From 020bd60ab2f156661b072515cd2c27d59b956535 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 2 Jul 2024 10:23:05 -0700 Subject: [PATCH 042/106] Switch amd container image base to rocky 8 The centos 7 arm mirrors have disappeared due to the EOL 2 days ago, and the vault sed workaround which works for x86 doesn't work for arm. --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 98a3ddfd2..b2c5c4a2f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -70,12 +70,12 @@ RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx" sh gen_linux.sh FROM --platform=linux/amd64 cpu-builder-amd64 AS cpu_avx2-build-amd64 RUN OLLAMA_SKIP_STATIC_GENERATE=1 OLLAMA_CPU_TARGET="cpu_avx2" sh gen_linux.sh -FROM --platform=linux/arm64 centos:7 AS cpu-builder-arm64 +FROM --platform=linux/arm64 rockylinux:8 AS cpu-builder-arm64 ARG CMAKE_VERSION ARG GOLANG_VERSION COPY ./scripts/rh_linux_deps.sh / RUN CMAKE_VERSION=${CMAKE_VERSION} GOLANG_VERSION=${GOLANG_VERSION} sh /rh_linux_deps.sh -ENV PATH /opt/rh/devtoolset-10/root/usr/bin:$PATH +ENV PATH /opt/rh/gcc-toolset-10/root/usr/bin:$PATH COPY --from=llm-code / /go/src/github.com/ollama/ollama/ ARG OLLAMA_CUSTOM_CPU_DEFS ARG CGO_CFLAGS From 996bb1b85e0c1b3ae64246a50ea412dc2a2e30d8 Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Tue, 2 Jul 2024 11:50:56 -0700 Subject: [PATCH 043/106] OpenAI: /v1/models and /v1/models/{model} compatibility (#5007) * OpenAI v1 models * Refactor Writers * Add Test Co-Authored-By: Attila Kerekes * Credit Co-Author Co-Authored-By: Attila Kerekes <439392+keriati@users.noreply.github.com> * Empty List Testing * Use Namespace for Ownedby * Update Test * Add back envconfig * v1/models docs * Use ModelName Parser * Test Names * Remove Docs * Clean Up * Test name Co-authored-by: Jeffrey Morgan * Add Middleware for Chat and List * Testing Cleanup * Test with Fatal * Add functionality to chat test * OpenAI: /v1/models/{model} compatibility (#5028) * Retrieve Model * OpenAI Delete Model * Retrieve Middleware * Remove Delete from Branch * Update Test * Middleware Test File * Function name * Cleanup * Test Update * Test Update --------- Co-authored-by: Attila Kerekes <439392+keriati@users.noreply.github.com> Co-authored-by: Jeffrey Morgan --- api/types.go | 7 ++ docs/openai.md | 1 + openai/openai.go | 163 ++++++++++++++++++++++++++++++++++++---- openai/openai_test.go | 170 ++++++++++++++++++++++++++++++++++++++++++ server/routes.go | 4 +- server/routes_test.go | 56 ++++++++++++++ 6 files changed, 387 insertions(+), 14 deletions(-) create mode 100644 openai/openai_test.go diff --git a/api/types.go b/api/types.go index 95ed5d37e..428281ba6 100644 --- a/api/types.go +++ b/api/types.go @@ -345,6 +345,13 @@ type ProcessModelResponse struct { SizeVRAM int64 `json:"size_vram"` } +type RetrieveModelResponse struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + OwnedBy string `json:"owned_by"` +} + type TokenResponse struct { Token string `json:"token"` } diff --git a/docs/openai.md b/docs/openai.md index 81b967eb7..9dda05c3a 100644 --- a/docs/openai.md +++ b/docs/openai.md @@ -65,6 +65,7 @@ curl http://localhost:11434/v1/chat/completions \ } ] }' + ``` ## Endpoints diff --git a/openai/openai.go b/openai/openai.go index 706d31aa2..01da44409 100644 --- a/openai/openai.go +++ b/openai/openai.go @@ -12,6 +12,7 @@ import ( "github.com/gin-gonic/gin" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/types/model" ) type Error struct { @@ -85,6 +86,18 @@ type ChatCompletionChunk struct { Choices []ChunkChoice `json:"choices"` } +type Model struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + OwnedBy string `json:"owned_by"` +} + +type ListCompletion struct { + Object string `json:"object"` + Data []Model `json:"data"` +} + func NewError(code int, message string) ErrorResponse { var etype string switch code { @@ -145,7 +158,33 @@ func toChunk(id string, r api.ChatResponse) ChatCompletionChunk { } } -func fromRequest(r ChatCompletionRequest) api.ChatRequest { +func toListCompletion(r api.ListResponse) ListCompletion { + var data []Model + for _, m := range r.Models { + data = append(data, Model{ + Id: m.Name, + Object: "model", + Created: m.ModifiedAt.Unix(), + OwnedBy: model.ParseName(m.Name).Namespace, + }) + } + + return ListCompletion{ + Object: "list", + Data: data, + } +} + +func toModel(r api.ShowResponse, m string) Model { + return Model{ + Id: m, + Object: "model", + Created: r.ModifiedAt.Unix(), + OwnedBy: model.ParseName(m).Namespace, + } +} + +func fromChatRequest(r ChatCompletionRequest) api.ChatRequest { var messages []api.Message for _, msg := range r.Messages { messages = append(messages, api.Message{Role: msg.Role, Content: msg.Content}) @@ -208,13 +247,26 @@ func fromRequest(r ChatCompletionRequest) api.ChatRequest { } } -type writer struct { - stream bool - id string +type BaseWriter struct { gin.ResponseWriter } -func (w *writer) writeError(code int, data []byte) (int, error) { +type ChatWriter struct { + stream bool + id string + BaseWriter +} + +type ListWriter struct { + BaseWriter +} + +type RetrieveWriter struct { + BaseWriter + model string +} + +func (w *BaseWriter) writeError(code int, data []byte) (int, error) { var serr api.StatusError err := json.Unmarshal(data, &serr) if err != nil { @@ -230,7 +282,7 @@ func (w *writer) writeError(code int, data []byte) (int, error) { return len(data), nil } -func (w *writer) writeResponse(data []byte) (int, error) { +func (w *ChatWriter) writeResponse(data []byte) (int, error) { var chatResponse api.ChatResponse err := json.Unmarshal(data, &chatResponse) if err != nil { @@ -270,7 +322,7 @@ func (w *writer) writeResponse(data []byte) (int, error) { return len(data), nil } -func (w *writer) Write(data []byte) (int, error) { +func (w *ChatWriter) Write(data []byte) (int, error) { code := w.ResponseWriter.Status() if code != http.StatusOK { return w.writeError(code, data) @@ -279,7 +331,92 @@ func (w *writer) Write(data []byte) (int, error) { return w.writeResponse(data) } -func Middleware() gin.HandlerFunc { +func (w *ListWriter) writeResponse(data []byte) (int, error) { + var listResponse api.ListResponse + err := json.Unmarshal(data, &listResponse) + if err != nil { + return 0, err + } + + w.ResponseWriter.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w.ResponseWriter).Encode(toListCompletion(listResponse)) + if err != nil { + return 0, err + } + + return len(data), nil +} + +func (w *ListWriter) Write(data []byte) (int, error) { + code := w.ResponseWriter.Status() + if code != http.StatusOK { + return w.writeError(code, data) + } + + return w.writeResponse(data) +} + +func (w *RetrieveWriter) writeResponse(data []byte) (int, error) { + var showResponse api.ShowResponse + err := json.Unmarshal(data, &showResponse) + if err != nil { + return 0, err + } + + // retrieve completion + w.ResponseWriter.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w.ResponseWriter).Encode(toModel(showResponse, w.model)) + if err != nil { + return 0, err + } + + return len(data), nil +} + +func (w *RetrieveWriter) Write(data []byte) (int, error) { + code := w.ResponseWriter.Status() + if code != http.StatusOK { + return w.writeError(code, data) + } + + return w.writeResponse(data) +} + +func ListMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + w := &ListWriter{ + BaseWriter: BaseWriter{ResponseWriter: c.Writer}, + } + + c.Writer = w + + c.Next() + } +} + +func RetrieveMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + var b bytes.Buffer + if err := json.NewEncoder(&b).Encode(api.ShowRequest{Name: c.Param("model")}); err != nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error())) + return + } + + c.Request.Body = io.NopCloser(&b) + + // response writer + w := &RetrieveWriter{ + BaseWriter: BaseWriter{ResponseWriter: c.Writer}, + model: c.Param("model"), + } + + c.Writer = w + + c.Next() + } +} + +func ChatMiddleware() gin.HandlerFunc { return func(c *gin.Context) { var req ChatCompletionRequest err := c.ShouldBindJSON(&req) @@ -294,17 +431,17 @@ func Middleware() gin.HandlerFunc { } var b bytes.Buffer - if err := json.NewEncoder(&b).Encode(fromRequest(req)); err != nil { + if err := json.NewEncoder(&b).Encode(fromChatRequest(req)); err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error())) return } c.Request.Body = io.NopCloser(&b) - w := &writer{ - ResponseWriter: c.Writer, - stream: req.Stream, - id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)), + w := &ChatWriter{ + BaseWriter: BaseWriter{ResponseWriter: c.Writer}, + stream: req.Stream, + id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)), } c.Writer = w diff --git a/openai/openai_test.go b/openai/openai_test.go new file mode 100644 index 000000000..1f335b965 --- /dev/null +++ b/openai/openai_test.go @@ -0,0 +1,170 @@ +package openai + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/ollama/ollama/api" + "github.com/stretchr/testify/assert" +) + +func TestMiddleware(t *testing.T) { + type testCase struct { + Name string + Method string + Path string + TestPath string + Handler func() gin.HandlerFunc + Endpoint func(c *gin.Context) + Setup func(t *testing.T, req *http.Request) + Expected func(t *testing.T, resp *httptest.ResponseRecorder) + } + + testCases := []testCase{ + { + Name: "chat handler", + Method: http.MethodPost, + Path: "/api/chat", + TestPath: "/api/chat", + Handler: ChatMiddleware, + Endpoint: func(c *gin.Context) { + var chatReq api.ChatRequest + if err := c.ShouldBindJSON(&chatReq); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + userMessage := chatReq.Messages[0].Content + var assistantMessage string + + switch userMessage { + case "Hello": + assistantMessage = "Hello!" + default: + assistantMessage = "I'm not sure how to respond to that." + } + + c.JSON(http.StatusOK, api.ChatResponse{ + Message: api.Message{ + Role: "assistant", + Content: assistantMessage, + }, + }) + }, + Setup: func(t *testing.T, req *http.Request) { + body := ChatCompletionRequest{ + Model: "test-model", + Messages: []Message{{Role: "user", Content: "Hello"}}, + } + + bodyBytes, _ := json.Marshal(body) + + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + req.Header.Set("Content-Type", "application/json") + }, + Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + var chatResp ChatCompletion + if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil { + t.Fatal(err) + } + + if chatResp.Object != "chat.completion" { + t.Fatalf("expected chat.completion, got %s", chatResp.Object) + } + + if chatResp.Choices[0].Message.Content != "Hello!" { + t.Fatalf("expected Hello!, got %s", chatResp.Choices[0].Message.Content) + } + }, + }, + { + Name: "list handler", + Method: http.MethodGet, + Path: "/api/tags", + TestPath: "/api/tags", + Handler: ListMiddleware, + Endpoint: func(c *gin.Context) { + c.JSON(http.StatusOK, api.ListResponse{ + Models: []api.ListModelResponse{ + { + Name: "Test Model", + }, + }, + }) + }, + Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + var listResp ListCompletion + if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil { + t.Fatal(err) + } + + if listResp.Object != "list" { + t.Fatalf("expected list, got %s", listResp.Object) + } + + if len(listResp.Data) != 1 { + t.Fatalf("expected 1, got %d", len(listResp.Data)) + } + + if listResp.Data[0].Id != "Test Model" { + t.Fatalf("expected Test Model, got %s", listResp.Data[0].Id) + } + }, + }, + { + Name: "retrieve model", + Method: http.MethodGet, + Path: "/api/show/:model", + TestPath: "/api/show/test-model", + Handler: RetrieveMiddleware, + Endpoint: func(c *gin.Context) { + c.JSON(http.StatusOK, api.ShowResponse{ + ModifiedAt: time.Date(2024, 6, 17, 13, 45, 0, 0, time.UTC), + }) + }, + Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + var retrieveResp Model + if err := json.NewDecoder(resp.Body).Decode(&retrieveResp); err != nil { + t.Fatal(err) + } + + if retrieveResp.Object != "model" { + t.Fatalf("Expected object to be model, got %s", retrieveResp.Object) + } + + if retrieveResp.Id != "test-model" { + t.Fatalf("Expected id to be test-model, got %s", retrieveResp.Id) + } + }, + }, + } + + gin.SetMode(gin.TestMode) + router := gin.New() + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + router = gin.New() + router.Use(tc.Handler()) + router.Handle(tc.Method, tc.Path, tc.Endpoint) + req, _ := http.NewRequest(tc.Method, tc.TestPath, nil) + + if tc.Setup != nil { + tc.Setup(t, req) + } + + resp := httptest.NewRecorder() + router.ServeHTTP(resp, req) + + assert.Equal(t, http.StatusOK, resp.Code) + + tc.Expected(t, resp) + }) + } +} diff --git a/server/routes.go b/server/routes.go index 76ead072f..ad2364507 100644 --- a/server/routes.go +++ b/server/routes.go @@ -1039,7 +1039,9 @@ func (s *Server) GenerateRoutes() http.Handler { r.GET("/api/ps", s.ProcessHandler) // Compatibility endpoints - r.POST("/v1/chat/completions", openai.Middleware(), s.ChatHandler) + r.POST("/v1/chat/completions", openai.ChatMiddleware(), s.ChatHandler) + r.GET("/v1/models", openai.ListMiddleware(), s.ListModelsHandler) + r.GET("/v1/models/:model", openai.RetrieveMiddleware(), s.ShowModelHandler) for _, method := range []string{http.MethodGet, http.MethodHead} { r.Handle(method, "/", func(c *gin.Context) { diff --git a/server/routes_test.go b/server/routes_test.go index 5a5c0fbba..50eaf7e97 100644 --- a/server/routes_test.go +++ b/server/routes_test.go @@ -20,6 +20,7 @@ import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/llm" + "github.com/ollama/ollama/openai" "github.com/ollama/ollama/parser" "github.com/ollama/ollama/types/model" "github.com/ollama/ollama/version" @@ -105,6 +106,24 @@ func Test_Routes(t *testing.T) { assert.Empty(t, len(modelList.Models)) }, }, + { + Name: "openai empty list", + Method: http.MethodGet, + Path: "/v1/models", + Expected: func(t *testing.T, resp *http.Response) { + contentType := resp.Header.Get("Content-Type") + assert.Equal(t, "application/json", contentType) + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + var modelList openai.ListCompletion + err = json.Unmarshal(body, &modelList) + require.NoError(t, err) + + assert.Equal(t, "list", modelList.Object) + assert.Empty(t, modelList.Data) + }, + }, { Name: "Tags Handler (yes tags)", Method: http.MethodGet, @@ -128,6 +147,25 @@ func Test_Routes(t *testing.T) { assert.Equal(t, "test-model:latest", modelList.Models[0].Name) }, }, + { + Name: "openai list models with tags", + Method: http.MethodGet, + Path: "/v1/models", + Expected: func(t *testing.T, resp *http.Response) { + contentType := resp.Header.Get("Content-Type") + assert.Equal(t, "application/json", contentType) + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + var modelList openai.ListCompletion + err = json.Unmarshal(body, &modelList) + require.NoError(t, err) + + assert.Len(t, modelList.Data, 1) + assert.Equal(t, "test-model:latest", modelList.Data[0].Id) + assert.Equal(t, "library", modelList.Data[0].OwnedBy) + }, + }, { Name: "Create Model Handler", Method: http.MethodPost, @@ -216,6 +254,24 @@ func Test_Routes(t *testing.T) { assert.InDelta(t, 0, showResp.ModelInfo["general.parameter_count"], 1e-9, "Parameter count should be 0") }, }, + { + Name: "openai retrieve model handler", + Method: http.MethodGet, + Path: "/v1/models/show-model", + Expected: func(t *testing.T, resp *http.Response) { + contentType := resp.Header.Get("Content-Type") + assert.Equal(t, "application/json", contentType) + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + var retrieveResp api.RetrieveModelResponse + err = json.Unmarshal(body, &retrieveResp) + require.NoError(t, err) + + assert.Equal(t, "show-model", retrieveResp.Id) + assert.Equal(t, "library", retrieveResp.OwnedBy) + }, + }, } t.Setenv("OLLAMA_MODELS", t.TempDir()) From 69c04eecc4b969149e43d6941f06a7d60dc5d191 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 2 Jul 2024 12:46:14 -0700 Subject: [PATCH 044/106] Add windows radeon concurreny note --- docs/faq.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/faq.md b/docs/faq.md index 841f1d13d..574112461 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -266,8 +266,10 @@ If there is insufficient available memory to load a new model request while one Parallel request processing for a given model results in increasing the context size by the number of parallel requests. For example, a 2K context with 4 parallel requests will result in an 8K context and additional memory allocation. -The following server settings may be used to adjust how Ollama handles concurrent requests: +The following server settings may be used to adjust how Ollama handles concurrent requests on most platforms: - `OLLAMA_MAX_LOADED_MODELS` - The maximum number of models that can be loaded concurrently provided they fit in available memory. The default is 3 * the number of GPUs or 3 for CPU inference. - `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory. - `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512 + +Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. \ No newline at end of file From d626b99b547c43e57390cec90ba2ae01adf0f429 Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:01:45 -0700 Subject: [PATCH 045/106] OpenAI: v1/completions compatibility (#5209) * OpenAI v1 models * Refactor Writers * Add Test Co-Authored-By: Attila Kerekes * Credit Co-Author Co-Authored-By: Attila Kerekes <439392+keriati@users.noreply.github.com> * Empty List Testing * Use Namespace for Ownedby * Update Test * Add back envconfig * v1/models docs * Use ModelName Parser * Test Names * Remove Docs * Clean Up * Test name Co-authored-by: Jeffrey Morgan * Add Middleware for Chat and List * Completions Endpoint * Testing Cleanup * Test with Fatal * Add functionality to chat test * Rename function * float types * type cleanup * cleaning * more cleaning * Extra test cases * merge conflicts * merge conflicts * merge conflicts * merge conflicts * cleaning * cleaning --------- Co-authored-by: Attila Kerekes <439392+keriati@users.noreply.github.com> Co-authored-by: Jeffrey Morgan --- openai/openai.go | 223 +++++++++++++++++++++++++++++++++++++++++- openai/openai_test.go | 132 ++++++++++++++++++++++++- server/routes.go | 1 + 3 files changed, 353 insertions(+), 3 deletions(-) diff --git a/openai/openai.go b/openai/openai.go index 01da44409..f1e75bf21 100644 --- a/openai/openai.go +++ b/openai/openai.go @@ -43,6 +43,12 @@ type ChunkChoice struct { FinishReason *string `json:"finish_reason"` } +type CompleteChunkChoice struct { + Text string `json:"text"` + Index int `json:"index"` + FinishReason *string `json:"finish_reason"` +} + type Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` @@ -86,6 +92,39 @@ type ChatCompletionChunk struct { Choices []ChunkChoice `json:"choices"` } +// TODO (https://github.com/ollama/ollama/issues/5259): support []string, []int and [][]int +type CompletionRequest struct { + Model string `json:"model"` + Prompt string `json:"prompt"` + FrequencyPenalty float32 `json:"frequency_penalty"` + MaxTokens *int `json:"max_tokens"` + PresencePenalty float32 `json:"presence_penalty"` + Seed *int `json:"seed"` + Stop any `json:"stop"` + Stream bool `json:"stream"` + Temperature *float32 `json:"temperature"` + TopP float32 `json:"top_p"` +} + +type Completion struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Model string `json:"model"` + SystemFingerprint string `json:"system_fingerprint"` + Choices []CompleteChunkChoice `json:"choices"` + Usage Usage `json:"usage,omitempty"` +} + +type CompletionChunk struct { + Id string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Choices []CompleteChunkChoice `json:"choices"` + Model string `json:"model"` + SystemFingerprint string `json:"system_fingerprint"` +} + type Model struct { Id string `json:"id"` Object string `json:"object"` @@ -158,6 +197,52 @@ func toChunk(id string, r api.ChatResponse) ChatCompletionChunk { } } +func toCompletion(id string, r api.GenerateResponse) Completion { + return Completion{ + Id: id, + Object: "text_completion", + Created: r.CreatedAt.Unix(), + Model: r.Model, + SystemFingerprint: "fp_ollama", + Choices: []CompleteChunkChoice{{ + Text: r.Response, + Index: 0, + FinishReason: func(reason string) *string { + if len(reason) > 0 { + return &reason + } + return nil + }(r.DoneReason), + }}, + Usage: Usage{ + // TODO: ollama returns 0 for prompt eval if the prompt was cached, but openai returns the actual count + PromptTokens: r.PromptEvalCount, + CompletionTokens: r.EvalCount, + TotalTokens: r.PromptEvalCount + r.EvalCount, + }, + } +} + +func toCompleteChunk(id string, r api.GenerateResponse) CompletionChunk { + return CompletionChunk{ + Id: id, + Object: "text_completion", + Created: time.Now().Unix(), + Model: r.Model, + SystemFingerprint: "fp_ollama", + Choices: []CompleteChunkChoice{{ + Text: r.Response, + Index: 0, + FinishReason: func(reason string) *string { + if len(reason) > 0 { + return &reason + } + return nil + }(r.DoneReason), + }}, + } +} + func toListCompletion(r api.ListResponse) ListCompletion { var data []Model for _, m := range r.Models { @@ -195,7 +280,7 @@ func fromChatRequest(r ChatCompletionRequest) api.ChatRequest { switch stop := r.Stop.(type) { case string: options["stop"] = []string{stop} - case []interface{}: + case []any: var stops []string for _, s := range stop { if str, ok := s.(string); ok { @@ -247,6 +332,52 @@ func fromChatRequest(r ChatCompletionRequest) api.ChatRequest { } } +func fromCompleteRequest(r CompletionRequest) (api.GenerateRequest, error) { + options := make(map[string]any) + + switch stop := r.Stop.(type) { + case string: + options["stop"] = []string{stop} + case []string: + options["stop"] = stop + default: + if r.Stop != nil { + return api.GenerateRequest{}, fmt.Errorf("invalid type for 'stop' field: %T", r.Stop) + } + } + + if r.MaxTokens != nil { + options["num_predict"] = *r.MaxTokens + } + + if r.Temperature != nil { + options["temperature"] = *r.Temperature * 2.0 + } else { + options["temperature"] = 1.0 + } + + if r.Seed != nil { + options["seed"] = *r.Seed + } + + options["frequency_penalty"] = r.FrequencyPenalty * 2.0 + + options["presence_penalty"] = r.PresencePenalty * 2.0 + + if r.TopP != 0.0 { + options["top_p"] = r.TopP + } else { + options["top_p"] = 1.0 + } + + return api.GenerateRequest{ + Model: r.Model, + Prompt: r.Prompt, + Options: options, + Stream: &r.Stream, + }, nil +} + type BaseWriter struct { gin.ResponseWriter } @@ -257,6 +388,12 @@ type ChatWriter struct { BaseWriter } +type CompleteWriter struct { + stream bool + id string + BaseWriter +} + type ListWriter struct { BaseWriter } @@ -331,6 +468,55 @@ func (w *ChatWriter) Write(data []byte) (int, error) { return w.writeResponse(data) } +func (w *CompleteWriter) writeResponse(data []byte) (int, error) { + var generateResponse api.GenerateResponse + err := json.Unmarshal(data, &generateResponse) + if err != nil { + return 0, err + } + + // completion chunk + if w.stream { + d, err := json.Marshal(toCompleteChunk(w.id, generateResponse)) + if err != nil { + return 0, err + } + + w.ResponseWriter.Header().Set("Content-Type", "text/event-stream") + _, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d))) + if err != nil { + return 0, err + } + + if generateResponse.Done { + _, err = w.ResponseWriter.Write([]byte("data: [DONE]\n\n")) + if err != nil { + return 0, err + } + } + + return len(data), nil + } + + // completion + w.ResponseWriter.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w.ResponseWriter).Encode(toCompletion(w.id, generateResponse)) + if err != nil { + return 0, err + } + + return len(data), nil +} + +func (w *CompleteWriter) Write(data []byte) (int, error) { + code := w.ResponseWriter.Status() + if code != http.StatusOK { + return w.writeError(code, data) + } + + return w.writeResponse(data) +} + func (w *ListWriter) writeResponse(data []byte) (int, error) { var listResponse api.ListResponse err := json.Unmarshal(data, &listResponse) @@ -416,6 +602,41 @@ func RetrieveMiddleware() gin.HandlerFunc { } } +func CompletionsMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + var req CompletionRequest + err := c.ShouldBindJSON(&req) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error())) + return + } + + var b bytes.Buffer + genReq, err := fromCompleteRequest(req) + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error())) + return + } + + if err := json.NewEncoder(&b).Encode(genReq); err != nil { + c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error())) + return + } + + c.Request.Body = io.NopCloser(&b) + + w := &CompleteWriter{ + BaseWriter: BaseWriter{ResponseWriter: c.Writer}, + stream: req.Stream, + id: fmt.Sprintf("cmpl-%d", rand.Intn(999)), + } + + c.Writer = w + + c.Next() + } +} + func ChatMiddleware() gin.HandlerFunc { return func(c *gin.Context) { var req ChatCompletionRequest diff --git a/openai/openai_test.go b/openai/openai_test.go index 1f335b965..4d21382c6 100644 --- a/openai/openai_test.go +++ b/openai/openai_test.go @@ -3,9 +3,11 @@ package openai import ( "bytes" "encoding/json" + "fmt" "io" "net/http" "net/http/httptest" + "strings" "testing" "time" @@ -69,6 +71,8 @@ func TestMiddleware(t *testing.T) { req.Header.Set("Content-Type", "application/json") }, Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusOK, resp.Code) + var chatResp ChatCompletion if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil { t.Fatal(err) @@ -83,6 +87,130 @@ func TestMiddleware(t *testing.T) { } }, }, + { + Name: "completions handler", + Method: http.MethodPost, + Path: "/api/generate", + TestPath: "/api/generate", + Handler: CompletionsMiddleware, + Endpoint: func(c *gin.Context) { + c.JSON(http.StatusOK, api.GenerateResponse{ + Response: "Hello!", + }) + }, + Setup: func(t *testing.T, req *http.Request) { + body := CompletionRequest{ + Model: "test-model", + Prompt: "Hello", + } + + bodyBytes, _ := json.Marshal(body) + + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + req.Header.Set("Content-Type", "application/json") + }, + Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusOK, resp.Code) + var completionResp Completion + if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil { + t.Fatal(err) + } + + if completionResp.Object != "text_completion" { + t.Fatalf("expected text_completion, got %s", completionResp.Object) + } + + if completionResp.Choices[0].Text != "Hello!" { + t.Fatalf("expected Hello!, got %s", completionResp.Choices[0].Text) + } + }, + }, + { + Name: "completions handler with params", + Method: http.MethodPost, + Path: "/api/generate", + TestPath: "/api/generate", + Handler: CompletionsMiddleware, + Endpoint: func(c *gin.Context) { + var generateReq api.GenerateRequest + if err := c.ShouldBindJSON(&generateReq); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + temperature := generateReq.Options["temperature"].(float64) + var assistantMessage string + + switch temperature { + case 1.6: + assistantMessage = "Received temperature of 1.6" + default: + assistantMessage = fmt.Sprintf("Received temperature of %f", temperature) + } + + c.JSON(http.StatusOK, api.GenerateResponse{ + Response: assistantMessage, + }) + }, + Setup: func(t *testing.T, req *http.Request) { + temp := float32(0.8) + body := CompletionRequest{ + Model: "test-model", + Prompt: "Hello", + Temperature: &temp, + } + + bodyBytes, _ := json.Marshal(body) + + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + req.Header.Set("Content-Type", "application/json") + }, + Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusOK, resp.Code) + var completionResp Completion + if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil { + t.Fatal(err) + } + + if completionResp.Object != "text_completion" { + t.Fatalf("expected text_completion, got %s", completionResp.Object) + } + + if completionResp.Choices[0].Text != "Received temperature of 1.6" { + t.Fatalf("expected Received temperature of 1.6, got %s", completionResp.Choices[0].Text) + } + }, + }, + { + Name: "completions handler with error", + Method: http.MethodPost, + Path: "/api/generate", + TestPath: "/api/generate", + Handler: CompletionsMiddleware, + Endpoint: func(c *gin.Context) { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + }, + Setup: func(t *testing.T, req *http.Request) { + body := CompletionRequest{ + Model: "test-model", + Prompt: "Hello", + } + + bodyBytes, _ := json.Marshal(body) + + req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + req.Header.Set("Content-Type", "application/json") + }, + Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + if resp.Code != http.StatusBadRequest { + t.Fatalf("expected 400, got %d", resp.Code) + } + + if !strings.Contains(resp.Body.String(), `"invalid request"`) { + t.Fatalf("error was not forwarded") + } + }, + }, { Name: "list handler", Method: http.MethodGet, @@ -99,6 +227,8 @@ func TestMiddleware(t *testing.T) { }) }, Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { + assert.Equal(t, http.StatusOK, resp.Code) + var listResp ListCompletion if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil { t.Fatal(err) @@ -162,8 +292,6 @@ func TestMiddleware(t *testing.T) { resp := httptest.NewRecorder() router.ServeHTTP(resp, req) - assert.Equal(t, http.StatusOK, resp.Code) - tc.Expected(t, resp) }) } diff --git a/server/routes.go b/server/routes.go index 9fe5fcc4e..41c920844 100644 --- a/server/routes.go +++ b/server/routes.go @@ -1054,6 +1054,7 @@ func (s *Server) GenerateRoutes() http.Handler { // Compatibility endpoints r.POST("/v1/chat/completions", openai.ChatMiddleware(), s.ChatHandler) + r.POST("/v1/completions", openai.CompletionsMiddleware(), s.GenerateHandler) r.GET("/v1/models", openai.ListMiddleware(), s.ListModelsHandler) r.GET("/v1/models/:model", openai.RetrieveMiddleware(), s.ShowModelHandler) From 65a5040e09d34b4e4237a4ac1996e2fb2a112bb3 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 2 Jul 2024 16:42:17 -0700 Subject: [PATCH 046/106] fix generate template --- server/routes.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/server/routes.go b/server/routes.go index 41c920844..b14a146c1 100644 --- a/server/routes.go +++ b/server/routes.go @@ -176,11 +176,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { prompt = req.Prompt case req.Prompt != "": if req.Template == "" { - model.Template, err = template.Parse(req.Template) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } + tmpl = model.Template } if req.System == "" { From ef757da2c90ad52f35c95688095dfd84655cceb7 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 10:30:07 -0700 Subject: [PATCH 047/106] Better nvidia GPU discovery logging Refine the way we log GPU discovery to improve the non-debug output, and report more actionable log messages when possible to help users troubleshoot on their own. --- docs/troubleshooting.md | 14 +++++++++----- gpu/gpu.go | 23 +++++++++++++++++++++-- gpu/gpu_info_nvcuda.c | 31 ++++++++++++++++--------------- gpu/gpu_info_nvcuda.h | 6 +++++- 4 files changed, 51 insertions(+), 23 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index de29b344c..bbb771831 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -70,14 +70,18 @@ curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/ -## Container fails to run on NVIDIA GPU +## NVIDIA GPU Discovery -Make sure you've set up the container runtime first as described in [docker.md](./docker.md) +When Ollama starts up, it takes inventory of the GPUs present in the system to determine compatibility and how much VRAM is available. Sometimes this discovery can fail to find your GPUs. In general, running the latest driver will yield the best results. -Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem +### Linux NVIDIA Troubleshooting -- Is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU. -- Is the uvm driver not loaded? `sudo nvidia-modprobe -u` +If you are using a container to run Ollama, make sure you've set up the container runtime first as described in [docker.md](./docker.md) + +Sometimes the Ollama can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem + +- If you are using a container, is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU. +- Is the uvm driver loaded? `sudo nvidia-modprobe -u` - Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm` - Try rebooting - Make sure you're running the latest nvidia drivers diff --git a/gpu/gpu.go b/gpu/gpu.go index 583bb79c6..29a3c1037 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -202,7 +202,7 @@ func GetGPUInfo() GpuInfoList { }() if !bootstrapped { - slog.Debug("Detecting GPUs") + slog.Info("looking for compatible GPUs") needRefresh = false cpuCapability = GetCPUCapability() var memInfo C.mem_info_t @@ -320,6 +320,9 @@ func GetGPUInfo() GpuInfoList { rocmGPUs = AMDGetGPUInfo() bootstrapped = true + if len(cudaGPUs) == 0 && len(rocmGPUs) == 0 && len(oneapiGPUs) == 0 { + slog.Info("no compatible GPUs were discovered") + } } // For detected GPUs, load library if not loaded @@ -514,7 +517,23 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) { defer C.free(unsafe.Pointer(lib)) C.nvcuda_init(lib, &resp) if resp.err != nil { - slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err)) + // Decide what log level based on the type of error message to help users understand why + msg := C.GoString(resp.err) + switch resp.cudaErr { + case C.CUDA_ERROR_INSUFFICIENT_DRIVER, C.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH: + slog.Warn("version mismatch between driver and cuda driver library - reboot or upgrade may be required", "library", libPath, "error", msg) + case C.CUDA_ERROR_NO_DEVICE: + slog.Info("no nvidia devices detected", "library", libPath) + case C.CUDA_ERROR_UNKNOWN: + slog.Warn("unknown error initializing cuda driver library", "library", libPath, "error", msg) + slog.Warn("see https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for more information") + default: + if strings.Contains(msg, "wrong ELF class") { + slog.Debug("skipping 32bit library", "library", libPath) + } else { + slog.Info("unable to load cuda driver library", "library", libPath, "error", msg) + } + } C.free(unsafe.Pointer(resp.err)) } else { return int(resp.num_devices), &resp.ch, libPath diff --git a/gpu/gpu_info_nvcuda.c b/gpu/gpu_info_nvcuda.c index abe140844..a1a38bfc2 100644 --- a/gpu/gpu_info_nvcuda.c +++ b/gpu/gpu_info_nvcuda.c @@ -7,6 +7,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { CUresult ret; resp->err = NULL; resp->num_devices = 0; + resp->cudaErr = CUDA_SUCCESS; const int buflen = 256; char buf[buflen + 1]; int i; @@ -38,6 +39,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { nvcuda_lib_path, msg); free(msg); resp->err = strdup(buf); + resp->cudaErr = -1; return; } @@ -52,6 +54,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { msg); free(msg); resp->err = strdup(buf); + resp->cudaErr = -1; return; } } @@ -61,12 +64,9 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { LOG(resp->ch.verbose, "cuInit err: %d\n", ret); UNLOAD_LIBRARY(resp->ch.handle); resp->ch.handle = NULL; - if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) { - resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama"); - return; - } - snprintf(buf, buflen, "nvcuda init failure: %d", ret); + snprintf(buf, buflen, "cuda driver library init failure: %d", ret); resp->err = strdup(buf); + resp->cudaErr = ret; return; } @@ -91,6 +91,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { resp->ch.handle = NULL; snprintf(buf, buflen, "unable to get device count: %d", ret); resp->err = strdup(buf); + resp->cudaErr = ret; return; } } @@ -106,13 +107,13 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) { CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if (h.handle == NULL) { - resp->err = strdup("nvcuda handle isn't initialized"); + resp->err = strdup("cuda driver library handle isn't initialized"); return; } ret = (*h.cuDeviceGet)(&device, i); if (ret != CUDA_SUCCESS) { - snprintf(buf, buflen, "nvcuda device failed to initialize"); + snprintf(buf, buflen, "cuda driver library device failed to initialize"); resp->err = strdup(buf); return; } @@ -168,14 +169,14 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) { // To get memory we have to set (and release) a context ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device); if (ret != CUDA_SUCCESS) { - snprintf(buf, buflen, "nvcuda failed to get device context %d", ret); + snprintf(buf, buflen, "cuda driver library failed to get device context %d", ret); resp->err = strdup(buf); return; } ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total); if (ret != CUDA_SUCCESS) { - snprintf(buf, buflen, "nvcuda device memory info lookup failure %d", ret); + snprintf(buf, buflen, "cuda driver library device memory info lookup failure %d", ret); resp->err = strdup(buf); // Best effort on failure... (*h.cuCtxDestroy)(ctx); @@ -193,7 +194,7 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) { ret = (*h.cuCtxDestroy)(ctx); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda failed to release device context %d", ret); + LOG(1, "cuda driver library failed to release device context %d", ret); } } @@ -206,7 +207,7 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) ret = (*h.cuDeviceGet)(&device, i); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda device failed to initialize"); + LOG(1, "cuda driver library device failed to initialize"); return; } @@ -214,13 +215,13 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) // To get memory we have to set (and release) a context ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda failed to get device context %d", ret); + LOG(1, "cuda driver library failed to get device context %d", ret); return; } ret = (*h.cuMemGetInfo_v2)(free, total); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda device memory info lookup failure %d", ret); + LOG(1, "cuda driver library device memory info lookup failure %d", ret); // Best effort on failure... (*h.cuCtxDestroy)(ctx); return; @@ -228,12 +229,12 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) ret = (*h.cuCtxDestroy)(ctx); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda failed to release device context %d", ret); + LOG(1, "cuda driver library failed to release device context %d", ret); } } void nvcuda_release(nvcuda_handle_t h) { - LOG(h.verbose, "releasing nvcuda library\n"); + LOG(h.verbose, "releasing cuda driver library\n"); UNLOAD_LIBRARY(h.handle); // TODO and other context release logic? h.handle = NULL; diff --git a/gpu/gpu_info_nvcuda.h b/gpu/gpu_info_nvcuda.h index f9654f641..ef2fe8a30 100644 --- a/gpu/gpu_info_nvcuda.h +++ b/gpu/gpu_info_nvcuda.h @@ -7,9 +7,12 @@ typedef enum cudaError_enum { CUDA_SUCCESS = 0, CUDA_ERROR_INVALID_VALUE = 1, - CUDA_ERROR_MEMORY_ALLOCATION = 2, + CUDA_ERROR_OUT_OF_MEMORY = 2, CUDA_ERROR_NOT_INITIALIZED = 3, CUDA_ERROR_INSUFFICIENT_DRIVER = 35, + CUDA_ERROR_NO_DEVICE = 100, + CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803, + CUDA_ERROR_UNKNOWN = 999, // Other values omitted for now... } CUresult; @@ -64,6 +67,7 @@ typedef struct nvcuda_init_resp { char *err; // If err is non-null handle is invalid nvcuda_handle_t ch; int num_devices; + CUresult cudaErr; } nvcuda_init_resp_t; void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp); From 6298f49816c2264f9bb77206ad1b015aa357e381 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 12:37:40 -0700 Subject: [PATCH 048/106] Fix clip model loading with unicode paths On windows, if the model dir contained unicode characters clip models would fail to load. This fixes the file name handling in clip.cpp to support utf16 on windows. --- llm/patches/08-clip-unicode.diff | 42 ++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 llm/patches/08-clip-unicode.diff diff --git a/llm/patches/08-clip-unicode.diff b/llm/patches/08-clip-unicode.diff new file mode 100644 index 000000000..53e5ee115 --- /dev/null +++ b/llm/patches/08-clip-unicode.diff @@ -0,0 +1,42 @@ +diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp +index 95fbe3d0..5a02a6ec 100644 +--- a/examples/llava/clip.cpp ++++ b/examples/llava/clip.cpp +@@ -32,6 +33,14 @@ + #include + #include + ++#if defined(_WIN32) ++#define WIN32_LEAN_AND_MEAN ++#ifndef NOMINMAX ++ #define NOMINMAX ++#endif ++#include ++#endif ++ + //#define CLIP_DEBUG_FUNCTIONS + + // RGB uint8 image +@@ -1055,7 +1064,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { + return nullptr; + } + ++#ifdef _WIN32 ++ int wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, NULL, 0); ++ if (!wlen) { ++ return NULL; ++ } ++ wchar_t * wbuf = (wchar_t *) malloc(wlen * sizeof(wchar_t)); ++ wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, wbuf, wlen); ++ if (!wlen) { ++ free(wbuf); ++ return NULL; ++ } ++ auto fin = std::ifstream(wbuf, std::ios::binary); ++ free(wbuf); ++#else + auto fin = std::ifstream(fname, std::ios::binary); ++#endif + if (!fin) { + LOG_TEE("cannot open model file for loading tensors\n"); + clip_free(new_clip); From 0e982bc1f47cfc7c36f49f925419f9039304925e Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 13:10:14 -0700 Subject: [PATCH 049/106] Fix corner cases on tmp cleaner on mac When ollama is running a long time, tmp cleaners can remove the runners. This tightens up a few corner cases on arm macs where we failed with "server cpu not listed in available servers map[]" --- llm/payload.go | 44 +++++++++++++++++++++++--------------------- llm/server.go | 15 ++++++++++++++- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/llm/payload.go b/llm/payload.go index 9296db336..b402e1f24 100644 --- a/llm/payload.go +++ b/llm/payload.go @@ -38,7 +38,7 @@ func Init() error { } var variants []string - for v := range availableServers() { + for v := range getAvailableServers() { variants = append(variants, v) } slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants)) @@ -50,7 +50,7 @@ func Init() error { // binary names may contain an optional variant separated by '_' // For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2" // Any library without a variant is the lowest common denominator -func availableServers() map[string]string { +func getAvailableServers() map[string]string { payloadsDir, err := gpu.PayloadsDir() if err != nil { slog.Error("payload lookup error", "error", err) @@ -80,7 +80,7 @@ func availableServers() map[string]string { // TODO - switch to metadata based mapping func serversForGpu(info gpu.GpuInfo) []string { // glob workDir for files that start with ollama_ - availableServers := availableServers() + availableServers := getAvailableServers() requested := info.Library if info.Variant != gpu.CPUCapabilityNone { requested += "_" + info.Variant.String() @@ -115,27 +115,29 @@ func serversForGpu(info gpu.GpuInfo) []string { servers = append(servers, alt...) } - // Load up the best CPU variant if not primary requested - if info.Library != "cpu" { - variant := gpu.GetCPUCapability() - // If no variant, then we fall back to default - // If we have a variant, try that if we find an exact match - // Attempting to run the wrong CPU instructions will panic the - // process - if variant != gpu.CPUCapabilityNone { - for cmp := range availableServers { - if cmp == "cpu_"+variant.String() { - servers = append(servers, cmp) - break + if !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") { + // Load up the best CPU variant if not primary requested + if info.Library != "cpu" { + variant := gpu.GetCPUCapability() + // If no variant, then we fall back to default + // If we have a variant, try that if we find an exact match + // Attempting to run the wrong CPU instructions will panic the + // process + if variant != gpu.CPUCapabilityNone { + for cmp := range availableServers { + if cmp == "cpu_"+variant.String() { + servers = append(servers, cmp) + break + } } + } else { + servers = append(servers, "cpu") } - } else { - servers = append(servers, "cpu") } - } - if len(servers) == 0 { - servers = []string{"cpu"} + if len(servers) == 0 { + servers = []string{"cpu"} + } } return servers @@ -147,7 +149,7 @@ func serverForCpu() string { return "metal" } variant := gpu.GetCPUCapability() - availableServers := availableServers() + availableServers := getAvailableServers() if variant != gpu.CPUCapabilityNone { for cmp := range availableServers { if cmp == "cpu_"+variant.String() { diff --git a/llm/server.go b/llm/server.go index 8b63cfbd5..4eb30e671 100644 --- a/llm/server.go +++ b/llm/server.go @@ -131,7 +131,20 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr return nil, errors.New("ollama supports only one lora adapter, but multiple were provided") } - availableServers := availableServers() + availableServers := getAvailableServers() + if len(availableServers) == 0 { + if runtime.GOOS != "windows" { + slog.Warn("llama server binary disappeared, reinitializing payloads") + err = Init() + if err != nil { + slog.Warn("failed to reinitialize payloads", "error", err) + return nil, err + } + availableServers = getAvailableServers() + } else { + return nil, finalErr + } + } var servers []string if cpuRunner != "" { servers = []string{cpuRunner} From 3b5a4a77f3a191e368af3412e5de9b38b4f80771 Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:46:23 -0700 Subject: [PATCH 050/106] Return Correct Prompt Eval Count Regardless of Cache Prompt (#5371) * openai compatibility * Revert "openai compatibility" This reverts commit d3f98a811e00fc497d889c8c45b0cfec5b64690c. * remove erroneous subtraction of prompt cache --- llm/ext_server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 3bc012521..099705998 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1732,7 +1732,7 @@ struct llama_server_context slot.n_past -= 1; } - slot.n_prompt_tokens_processed = slot.n_prompt_tokens - slot.n_past; + slot.n_prompt_tokens_processed = slot.n_prompt_tokens; if (slot.ga_n != 1) { From 3c75113e37cc2b5d9ad8cb5c21841437aab482cc Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 14:47:42 -0700 Subject: [PATCH 051/106] Prevent loading models larger than total memory Users may not realize the siny new model they're trying to load fits on their disk, but can't load into system+GPU memory. Today we crash, but with this fix, we'll give them a better error message before even trying to load it. --- server/sched.go | 26 ++++++++++++++++++++++++++ server/sched_test.go | 12 ++++++++++++ 2 files changed, 38 insertions(+) diff --git a/server/sched.go b/server/sched.go index 71b535ae2..362430986 100644 --- a/server/sched.go +++ b/server/sched.go @@ -139,6 +139,11 @@ func (s *Scheduler) processPending(ctx context.Context) { } for { + cpus := s.getCpuFn() + var systemMem gpu.GpuInfo + if len(cpus) > 0 { + systemMem = cpus[0] + } var runnerToExpire *runnerRef s.loadedMu.Lock() runner := s.loaded[pending.model.ModelPath] @@ -192,6 +197,27 @@ func (s *Scheduler) processPending(ctx context.Context) { break } + // Block attempting to load a model larger than system memory + GPU memory + estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts) + maxSize := systemMem.FreeMemory + for _, gpu := range gpus { + if gpu.Library == "cpu" { + continue + } + if loadedCount == 0 { + // If no other models are loaded, set the limit based on what's available + maxSize += gpu.FreeMemory + } else { + // Other models could be unloaded, favor total memory for limit + maxSize += gpu.TotalMemory + } + } + if estimate.TotalSize > maxSize { + slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize)) + pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) + break + } + // Evaluate if the model will fit in the available system memory, or if we should unload a model first if len(gpus) == 1 && gpus[0].Library == "cpu" { // simplifying assumption of defaultParallel when in CPU mode diff --git a/server/sched_test.go b/server/sched_test.go index be0830a34..83075f749 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -199,6 +199,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario1a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario1a.req.errCh) + case err := <-scenario1a.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -212,6 +214,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario1a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario1b.req.errCh) + case err := <-scenario1b.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -230,6 +234,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario2a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario2a.req.errCh) + case err := <-scenario2a.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -246,6 +252,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario3a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3a.req.errCh) + case err := <-scenario3a.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -262,6 +270,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario3b.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3b.req.errCh) + case err := <-scenario3b.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -278,6 +288,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario3c.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3c.req.errCh) + case err := <-scenario3c.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } From 955f2a4e035044866277e26abe74343117250f1a Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 2 Jul 2024 15:12:43 -0700 Subject: [PATCH 052/106] Only set default keep_alive on initial model load This change fixes the handling of keep_alive so that if client request omits the setting, we only set this on initial load. Once the model is loaded, if new requests leave this unset, we'll keep whatever keep_alive was there. --- envconfig/config.go | 31 ++++++++++++++++++++-- envconfig/config_test.go | 17 ++++++++++++ server/routes.go | 57 +++------------------------------------- server/sched.go | 14 +++++++--- server/sched_test.go | 22 ++++++++-------- 5 files changed, 70 insertions(+), 71 deletions(-) diff --git a/envconfig/config.go b/envconfig/config.go index c02c4878e..105b9af6e 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -4,12 +4,14 @@ import ( "errors" "fmt" "log/slog" + "math" "net" "os" "path/filepath" "runtime" "strconv" "strings" + "time" ) type OllamaHost struct { @@ -34,7 +36,7 @@ var ( // Set via OLLAMA_HOST in the environment Host *OllamaHost // Set via OLLAMA_KEEP_ALIVE in the environment - KeepAlive string + KeepAlive time.Duration // Set via OLLAMA_LLM_LIBRARY in the environment LLMLibrary string // Set via OLLAMA_MAX_LOADED_MODELS in the environment @@ -132,6 +134,7 @@ func init() { NumParallel = 0 // Autoselect MaxRunners = 0 // Autoselect MaxQueuedRequests = 512 + KeepAlive = 5 * time.Minute LoadConfig() } @@ -266,7 +269,10 @@ func LoadConfig() { } } - KeepAlive = clean("OLLAMA_KEEP_ALIVE") + ka := clean("OLLAMA_KEEP_ALIVE") + if ka != "" { + loadKeepAlive(ka) + } var err error ModelsDir, err = getModelsDir() @@ -344,3 +350,24 @@ func getOllamaHost() (*OllamaHost, error) { Port: port, }, nil } + +func loadKeepAlive(ka string) { + v, err := strconv.Atoi(ka) + if err != nil { + d, err := time.ParseDuration(ka) + if err == nil { + if d < 0 { + KeepAlive = time.Duration(math.MaxInt64) + } else { + KeepAlive = d + } + } + } else { + d := time.Duration(v) * time.Second + if d < 0 { + KeepAlive = time.Duration(math.MaxInt64) + } else { + KeepAlive = d + } + } +} diff --git a/envconfig/config_test.go b/envconfig/config_test.go index 7d923d629..a5d73fd7c 100644 --- a/envconfig/config_test.go +++ b/envconfig/config_test.go @@ -2,8 +2,10 @@ package envconfig import ( "fmt" + "math" "net" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +25,21 @@ func TestConfig(t *testing.T) { t.Setenv("OLLAMA_FLASH_ATTENTION", "1") LoadConfig() require.True(t, FlashAttention) + t.Setenv("OLLAMA_KEEP_ALIVE", "") + LoadConfig() + require.Equal(t, 5*time.Minute, KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "3") + LoadConfig() + require.Equal(t, 3*time.Second, KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "1h") + LoadConfig() + require.Equal(t, 1*time.Hour, KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "-1s") + LoadConfig() + require.Equal(t, time.Duration(math.MaxInt64), KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "-1") + LoadConfig() + require.Equal(t, time.Duration(math.MaxInt64), KeepAlive) } func TestClientFromEnvironment(t *testing.T) { diff --git a/server/routes.go b/server/routes.go index b14a146c1..ac6b713a7 100644 --- a/server/routes.go +++ b/server/routes.go @@ -9,7 +9,6 @@ import ( "io" "io/fs" "log/slog" - "math" "net" "net/http" "net/netip" @@ -17,7 +16,6 @@ import ( "os/signal" "path/filepath" "slices" - "strconv" "strings" "syscall" "time" @@ -56,8 +54,6 @@ func init() { gin.SetMode(mode) } -var defaultSessionDuration = 5 * time.Minute - func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options, error) { opts := api.DefaultOptions() if err := opts.FromMap(model.Options); err != nil { @@ -133,14 +129,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { return } - var sessionDuration time.Duration - if req.KeepAlive == nil { - sessionDuration = getDefaultSessionDuration() - } else { - sessionDuration = req.KeepAlive.Duration - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration) + rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) var runner *runnerRef select { case runner = <-rCh: @@ -320,32 +309,6 @@ func (s *Server) GenerateHandler(c *gin.Context) { streamResponse(c, ch) } -func getDefaultSessionDuration() time.Duration { - if envconfig.KeepAlive != "" { - v, err := strconv.Atoi(envconfig.KeepAlive) - if err != nil { - d, err := time.ParseDuration(envconfig.KeepAlive) - if err != nil { - return defaultSessionDuration - } - - if d < 0 { - return time.Duration(math.MaxInt64) - } - - return d - } - - d := time.Duration(v) * time.Second - if d < 0 { - return time.Duration(math.MaxInt64) - } - return d - } - - return defaultSessionDuration -} - func (s *Server) EmbeddingsHandler(c *gin.Context) { var req api.EmbeddingRequest err := c.ShouldBindJSON(&req) @@ -380,14 +343,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) { return } - var sessionDuration time.Duration - if req.KeepAlive == nil { - sessionDuration = getDefaultSessionDuration() - } else { - sessionDuration = req.KeepAlive.Duration - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration) + rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) var runner *runnerRef select { case runner = <-rCh: @@ -1318,14 +1274,7 @@ func (s *Server) ChatHandler(c *gin.Context) { return } - var sessionDuration time.Duration - if req.KeepAlive == nil { - sessionDuration = getDefaultSessionDuration() - } else { - sessionDuration = req.KeepAlive.Duration - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration) + rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) var runner *runnerRef select { case runner = <-rCh: diff --git a/server/sched.go b/server/sched.go index 71b535ae2..dc492cfb3 100644 --- a/server/sched.go +++ b/server/sched.go @@ -24,7 +24,7 @@ type LlmRequest struct { model *Model opts api.Options origNumCtx int // Track the initial ctx request - sessionDuration time.Duration + sessionDuration *api.Duration successCh chan *runnerRef errCh chan error schedAttempts uint @@ -75,7 +75,7 @@ func InitScheduler(ctx context.Context) *Scheduler { } // context must be canceled to decrement ref count and release the runner -func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration time.Duration) (chan *runnerRef, chan error) { +func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration *api.Duration) (chan *runnerRef, chan error) { if opts.NumCtx < 4 { opts.NumCtx = 4 } @@ -389,7 +389,9 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm runner.expireTimer.Stop() runner.expireTimer = nil } - runner.sessionDuration = pending.sessionDuration + if pending.sessionDuration != nil { + runner.sessionDuration = pending.sessionDuration.Duration + } pending.successCh <- runner go func() { <-pending.ctx.Done() @@ -402,6 +404,10 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, if numParallel < 1 { numParallel = 1 } + sessionDuration := envconfig.KeepAlive + if req.sessionDuration != nil { + sessionDuration = req.sessionDuration.Duration + } llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts, numParallel) if err != nil { // some older models are not compatible with newer versions of llama.cpp @@ -419,7 +425,7 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, modelPath: req.model.ModelPath, llama: llama, Options: &req.opts, - sessionDuration: req.sessionDuration, + sessionDuration: sessionDuration, gpus: gpus, estimatedVRAM: llama.EstimatedVRAM(), estimatedTotal: llama.EstimatedTotal(), diff --git a/server/sched_test.go b/server/sched_test.go index be0830a34..d957927e6 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -44,7 +44,7 @@ func TestLoad(t *testing.T) { opts: api.DefaultOptions(), successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), - sessionDuration: 2, + sessionDuration: &api.Duration{Duration: 2 * time.Second}, } // Fail to load model first s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { @@ -142,7 +142,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV ctx: scenario.ctx, model: model, opts: api.DefaultOptions(), - sessionDuration: 5 * time.Millisecond, + sessionDuration: &api.Duration{Duration: 5 * time.Millisecond}, successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), } @@ -156,18 +156,18 @@ func TestRequests(t *testing.T) { // Same model, same request scenario1a := newScenario(t, ctx, "ollama-model-1", 10) - scenario1a.req.sessionDuration = 5 * time.Millisecond + scenario1a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond} scenario1b := newScenario(t, ctx, "ollama-model-1", 11) scenario1b.req.model = scenario1a.req.model scenario1b.ggml = scenario1a.ggml - scenario1b.req.sessionDuration = 0 + scenario1b.req.sessionDuration = &api.Duration{Duration: 0} // simple reload of same model scenario2a := newScenario(t, ctx, "ollama-model-1", 20) tmpModel := *scenario1a.req.model scenario2a.req.model = &tmpModel scenario2a.ggml = scenario1a.ggml - scenario2a.req.sessionDuration = 5 * time.Millisecond + scenario2a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond} // Multiple loaded models scenario3a := newScenario(t, ctx, "ollama-model-3a", 1*format.GigaByte) @@ -318,11 +318,11 @@ func TestGetRunner(t *testing.T) { defer done() scenario1a := newScenario(t, ctx, "ollama-model-1a", 10) - scenario1a.req.sessionDuration = 0 + scenario1a.req.sessionDuration = &api.Duration{Duration: 0} scenario1b := newScenario(t, ctx, "ollama-model-1b", 10) - scenario1b.req.sessionDuration = 0 + scenario1b.req.sessionDuration = &api.Duration{Duration: 0} scenario1c := newScenario(t, ctx, "ollama-model-1c", 10) - scenario1c.req.sessionDuration = 0 + scenario1c.req.sessionDuration = &api.Duration{Duration: 0} envconfig.MaxQueuedRequests = 1 s := InitScheduler(ctx) s.getGpuFn = func() gpu.GpuInfoList { @@ -402,7 +402,7 @@ func TestPrematureExpired(t *testing.T) { case <-ctx.Done(): t.Fatal("timeout") } - time.Sleep(scenario1a.req.sessionDuration) + time.Sleep(scenario1a.req.sessionDuration.Duration) scenario1a.ctxDone() time.Sleep(20 * time.Millisecond) require.LessOrEqual(t, len(s.finishedReqCh), 1) @@ -423,7 +423,7 @@ func TestUseLoadedRunner(t *testing.T) { ctx: ctx, opts: api.DefaultOptions(), successCh: make(chan *runnerRef, 1), - sessionDuration: 2, + sessionDuration: &api.Duration{Duration: 2}, } finished := make(chan *LlmRequest) llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}} @@ -614,7 +614,7 @@ func TestAlreadyCanceled(t *testing.T) { dctx, done2 := context.WithCancel(ctx) done2() scenario1a := newScenario(t, dctx, "ollama-model-1", 10) - scenario1a.req.sessionDuration = 0 + scenario1a.req.sessionDuration = &api.Duration{Duration: 0} s := InitScheduler(ctx) slog.Info("scenario1a") s.pendingReqCh <- scenario1a.req From 0d16eb310ed26e5a438f482dbffe7687e106346e Mon Sep 17 00:00:00 2001 From: Anatoli Babenia Date: Thu, 4 Jul 2024 01:36:11 +0300 Subject: [PATCH 053/106] fix: use `envconfig.ModelsDir` directly (#4821) * Co-authored-by: Anatoli Babenia Co-authored-by: Maas Lalani --- envconfig/config.go | 4 ++-- server/modelpath.go | 21 +++------------------ 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/envconfig/config.go b/envconfig/config.go index 105b9af6e..62d661ebc 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -43,10 +43,10 @@ var ( MaxRunners int // Set via OLLAMA_MAX_QUEUE in the environment MaxQueuedRequests int - // Set via OLLAMA_MODELS in the environment - ModelsDir string // Set via OLLAMA_MAX_VRAM in the environment MaxVRAM uint64 + // Set via OLLAMA_MODELS in the environment + ModelsDir string // Set via OLLAMA_NOHISTORY in the environment NoHistory bool // Set via OLLAMA_NOPRUNE in the environment diff --git a/server/modelpath.go b/server/modelpath.go index 64f59c29a..3fdb4238f 100644 --- a/server/modelpath.go +++ b/server/modelpath.go @@ -103,18 +103,9 @@ func (mp ModelPath) GetShortTagname() string { return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag) } -// modelsDir returns the value of the OLLAMA_MODELS environment variable or the user's home directory if OLLAMA_MODELS is not set. -// The models directory is where Ollama stores its model files and manifests. -func modelsDir() (string, error) { - return envconfig.ModelsDir, nil -} - // GetManifestPath returns the path to the manifest file for the given model path, it is up to the caller to create the directory if it does not exist. func (mp ModelPath) GetManifestPath() (string, error) { - dir, err := modelsDir() - if err != nil { - return "", err - } + dir := envconfig.ModelsDir return filepath.Join(dir, "manifests", mp.Registry, mp.Namespace, mp.Repository, mp.Tag), nil } @@ -127,10 +118,7 @@ func (mp ModelPath) BaseURL() *url.URL { } func GetManifestPath() (string, error) { - dir, err := modelsDir() - if err != nil { - return "", err - } + dir := envconfig.ModelsDir path := filepath.Join(dir, "manifests") if err := os.MkdirAll(path, 0o755); err != nil { @@ -141,10 +129,7 @@ func GetManifestPath() (string, error) { } func GetBlobsPath(digest string) (string, error) { - dir, err := modelsDir() - if err != nil { - return "", err - } + dir := envconfig.ModelsDir // only accept actual sha256 digests pattern := "^sha256[:-][0-9a-fA-F]{64}$" From 4d71c559b21ec9207a328b824ce534bdbaf59f2d Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 3 Jul 2024 20:04:30 -0400 Subject: [PATCH 054/106] fix error detection by limiting model loading error parsing (#5472) --- llm/status.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/status.go b/llm/status.go index 0f56b7f99..d9f361155 100644 --- a/llm/status.go +++ b/llm/status.go @@ -25,7 +25,7 @@ var errorPrefixes = []string{ "CUDA error", "cudaMalloc failed", "\"ERR\"", - "architecture", + "error loading model", } func (w *StatusWriter) Write(b []byte) (int, error) { From 52abc8acb702cad0b58cee92721e64687f5a6c85 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 13 May 2024 15:08:29 -0700 Subject: [PATCH 055/106] Document older win10 terminal problems We haven't found a workaround, so for now recommend updating. --- docs/troubleshooting.md | 5 +++++ docs/windows.md | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index bbb771831..484c4b6ce 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -89,3 +89,8 @@ Sometimes the Ollama can have difficulties initializing the GPU. When you check If none of those resolve the problem, gather additional information and file an issue: - Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs - Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia` + + +## Windows Terminal Errors + +Older versions of Windows 10 (e.g., 21H1) are known to have a bug where the standard terminal program does not display control characters correctly. This can result in a long string of strings like `←[?25h←[?25l` being displayed, sometimes erroring with `The parameter is incorrect` To resolve this problem, please update to Win 10 22H1 or newer. diff --git a/docs/windows.md b/docs/windows.md index abc0eb300..69c2aa6d1 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -19,7 +19,7 @@ Logs will often be helpful in diagnosing the problem (see ## System Requirements -* Windows 10 or newer, Home or Pro +* Windows 10 22H2 or newer, Home or Pro * NVIDIA 452.39 or newer Drivers if you have an NVIDIA card * AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card From e9188e971a998faff7aabd867ebc0ef1dc7f672b Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 11:20:57 -0400 Subject: [PATCH 056/106] Fix assert on small embedding inputs (#5491) * Fix assert on small embedding inputs * Update llm/patches/09-pooling.diff --- llm/patches/09-pooling.diff | 60 +++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 llm/patches/09-pooling.diff diff --git a/llm/patches/09-pooling.diff b/llm/patches/09-pooling.diff new file mode 100644 index 000000000..348fbfdc4 --- /dev/null +++ b/llm/patches/09-pooling.diff @@ -0,0 +1,60 @@ +diff --git a/llama.cpp b/llama.cpp +index 61948751..61fe7b57 100644 +--- a/llama.cpp ++++ b/llama.cpp +@@ -7591,14 +7591,14 @@ struct llm_build_context { + } + + struct ggml_tensor * build_inp_mean() { +- lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); ++ lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, cparams.n_seq_max); + cb(lctx.inp_mean, "inp_mean", -1); + ggml_set_input(lctx.inp_mean); + return lctx.inp_mean; + } + + struct ggml_tensor * build_inp_cls() { +- lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); ++ lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, cparams.n_seq_max); + cb(lctx.inp_cls, "inp_cls", -1); + ggml_set_input(lctx.inp_cls); + return lctx.inp_cls; +@@ -12062,19 +12062,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer)); + + float * data = (float *) lctx.inp_mean->data; +- memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean)); ++ memset(lctx.inp_mean->data, 0, n_tokens * cparams.n_seq_max * ggml_element_size(lctx.inp_mean)); + + std::vector sum(n_tokens, 0); + for (int i = 0; i < n_tokens; ++i) { + const llama_seq_id seq_id = batch.seq_id[i][0]; +- +- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); +- + sum[seq_id] += 1; + } + +- std::vector div(n_tokens, 0.0f); +- for (int i = 0; i < n_tokens; ++i) { ++ std::vector div(cparams.n_seq_max, 0.0f); ++ for (uint32_t i = 0; i < cparams.n_seq_max; ++i) { + const uint64_t s = sum[i]; + if (s > 0) { + div[i] = 1.0f/float(s); +@@ -12094,14 +12091,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + + uint32_t * data = (uint32_t *) lctx.inp_cls->data; +- memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); ++ memset(lctx.inp_cls->data, 0, cparams.n_seq_max * ggml_element_size(lctx.inp_cls)); + + for (int i = 0; i < n_tokens; ++i) { + const llama_seq_id seq_id = batch.seq_id[i][0]; + const llama_pos pos = batch.pos[i]; +- +- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS"); +- + if (pos == 0) { + data[seq_id] = i; + } From d89454de805c6d9507796cf2a262986db43ed849 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 12:32:47 -0400 Subject: [PATCH 057/106] Use slot with cached prompt instead of least recently used (#5492) * Use common prefix to select slot * actually report `longest` --- llm/ext_server/server.cpp | 40 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 099705998..00a15b4a3 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1382,12 +1382,50 @@ struct llama_server_context } } + std::string common_prefix(const std::string& str1, const std::string& str2) { + auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin()); + return std::string(str1.begin(), mismatch_pair.first); + } + + // Find the slot that has the greatest common prefix + server_slot *prefix_slot(const json &prompt) { + if (!prompt.is_string()) { + return nullptr; + } + + std::string prompt_str = prompt.get(); + server_slot *slot = nullptr; + size_t longest = 0; + + for (server_slot &s : slots) { + if (s.available() && s.prompt.is_string()) { + std::string s_prompt = s.prompt.get(); + std::string prefix = common_prefix(s_prompt, prompt_str); + + if (prefix.size() > longest) { + slot = &s; + longest = prefix.size(); + } + } + } + + if (!slot) { + return get_slot(-1); + } + + LOG_INFO("slot with common prefix found", {{ + "slot_id", slot->id, + "characters", longest + }}); + return slot; + } + void process_single_task(task_server& task) { switch (task.type) { case TASK_TYPE_COMPLETION: { - server_slot *slot = get_slot(json_value(task.data, "slot_id", -1)); + server_slot *slot = prefix_slot(task.data["prompt"]); if (slot == nullptr) { // if no slot is available, we defer this task for processing later From 8f8e736b131510c8707bed5886b343906cb74a24 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 13:25:58 -0400 Subject: [PATCH 058/106] update llama.cpp submodule to `d7fd29f` (#5475) --- docs/development.md | 2 +- llm/ext_server/CMakeLists.txt | 26 +- llm/generate/gen_darwin.sh | 16 +- llm/generate/gen_linux.sh | 36 +-- llm/generate/gen_windows.ps1 | 44 ++-- llm/llama.cpp | 2 +- llm/llm.go | 14 +- llm/patches/01-load-progress.diff | 14 +- llm/patches/03-load_exception.diff | 24 +- llm/patches/04-metal.diff | 6 +- llm/patches/05-default-pretokenizer.diff | 18 +- llm/patches/06-qwen2.diff | 6 +- llm/patches/07-embeddings.diff | 45 ++++ llm/patches/07-gemma.diff | 305 ----------------------- llm/patches/09-pooling.diff | 14 +- 15 files changed, 150 insertions(+), 422 deletions(-) create mode 100644 llm/patches/07-embeddings.diff delete mode 100644 llm/patches/07-gemma.diff diff --git a/docs/development.md b/docs/development.md index 2a6886a43..cd6c41af5 100644 --- a/docs/development.md +++ b/docs/development.md @@ -104,7 +104,7 @@ like to use. For example, to compile an optimized binary for an Intel i9-9880H, you might use: ``` -OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./... +OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on" go generate ./... go build . ``` diff --git a/llm/ext_server/CMakeLists.txt b/llm/ext_server/CMakeLists.txt index db7d52dcc..9de50739c 100644 --- a/llm/ext_server/CMakeLists.txt +++ b/llm/ext_server/CMakeLists.txt @@ -1,14 +1,14 @@ - -set(TARGET ollama_llama_server) -option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) -install(TARGETS ${TARGET} RUNTIME) -target_compile_definitions(${TARGET} PRIVATE - SERVER_VERBOSE=$ -) -target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) -if (WIN32) - TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) -endif() + +set(TARGET ollama_llama_server) +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) +install(TARGETS ${TARGET} RUNTIME) +target_compile_definitions(${TARGET} PRIVATE + SERVER_VERBOSE=$ +) +target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) +if (WIN32) + TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) +endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) \ No newline at end of file diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index 721a9ae80..02577545a 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -18,16 +18,16 @@ sign() { fi } -COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DLLAMA_METAL_EMBED_LIBRARY=on -DLLAMA_OPENMP=off" +COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off" case "${GOARCH}" in "amd64") - COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_NATIVE=off" + COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DGGML_METAL=off -DGGML_NATIVE=off" # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_BLAS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build @@ -37,7 +37,7 @@ case "${GOARCH}" in # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_BLAS=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/cpu" echo "Building LCD CPU" build @@ -49,7 +49,7 @@ case "${GOARCH}" in # Approximately 400% faster than LCD on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_BLAS=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/cpu_avx" echo "Building AVX CPU" build @@ -61,7 +61,7 @@ case "${GOARCH}" in # Approximately 10% faster than AVX on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=on -DLLAMA_BLAS=off -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=on -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2" echo "Building AVX2 CPU" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation" @@ -75,14 +75,14 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_BLAS=off -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then init_vars - CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DLLAMA_ACCELERATE=on -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=on ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/metal" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders" build diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 28ce1f21d..c36862520 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -51,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then export CUDACXX=$(command -v nvcc) fi fi -COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off" +COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off" source $(dirname $0)/gen_common.sh init_vars git_module_setup @@ -64,7 +64,7 @@ if [ -z "${OLLAMA_SKIP_STATIC_GENERATE}" -o "${OLLAMA_CPU_TARGET}" = "static" ]; # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off ${CMAKE_DEFS}" + CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DGGML_NATIVE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}_static" echo "Building static library" build @@ -84,22 +84,22 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then compress else # Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512 - # -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer - # -DLLAMA_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX) - # -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen - # -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver + # -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer + # -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX) + # -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen + # -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver # Note: the following seem to yield slower results than AVX2 - ymmv - # -DLLAMA_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT) - # -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake - # -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake + # -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT) + # -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake + # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake - COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off" + COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off" if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then # # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu" echo "Building LCD CPU" build @@ -116,7 +116,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # Approximately 400% faster than LCD on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu_avx" echo "Building AVX CPU" build @@ -129,7 +129,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # Approximately 10% faster than AVX on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu_avx2" echo "Building AVX2 CPU" build @@ -170,15 +170,15 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then # # CUDA compute < 6.0 lacks proper FP16 support on ARM. # Disabling has minimal performance effect while maintaining compatibility. - ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off" + ARM64_DEFS="-DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_CUDA_F16=off" fi # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\"" - CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" echo "Building custom CUDA GPU" else - CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DGGML_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" fi CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" @@ -216,7 +216,7 @@ if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then init_vars source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI CC=icx - CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL=ON -DLLAMA_SYCL_F16=OFF" + CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DGGML_SYCL_F16=OFF" BUILD_DIR="../build/linux/${ARCH}/oneapi" EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb" DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it @@ -254,7 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true) fi init_vars - CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)" + CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)" # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\"" diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index e217a0382..5c6943502 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -39,8 +39,8 @@ function init_vars { } $script:cmakeDefs = @( "-DBUILD_SHARED_LIBS=on", - "-DLLAMA_NATIVE=off", - "-DLLAMA_OPENMP=off" + "-DGGML_NATIVE=off", + "-DGGML_OPENMP=off" ) $script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on") $script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower() @@ -182,9 +182,9 @@ function cleanup { } -# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer -# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen -# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver +# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer +# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen +# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver function build_static() { @@ -204,13 +204,13 @@ function build_static() { "-DCMAKE_C_COMPILER=gcc.exe", "-DCMAKE_CXX_COMPILER=g++.exe", "-DBUILD_SHARED_LIBS=off", - "-DLLAMA_NATIVE=off", - "-DLLAMA_AVX=off", - "-DLLAMA_AVX2=off", - "-DLLAMA_AVX512=off", - "-DLLAMA_F16C=off", - "-DLLAMA_FMA=off", - "-DLLAMA_OPENMP=off") + "-DGGML_NATIVE=off", + "-DGGML_AVX=off", + "-DGGML_AVX2=off", + "-DGGML_AVX512=off", + "-DGGML_F16C=off", + "-DGGML_FMA=off", + "-DGGML_OPENMP=off") $script:buildDir="../build/windows/${script:ARCH}_static" write-host "Building static library" build @@ -224,7 +224,7 @@ function build_cpu($gen_arch) { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) { # remaining llama.cpp builds use MSVC init_vars - $script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DLLAMA_AVX=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs + $script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs $script:buildDir="../build/windows/${script:ARCH}/cpu" $script:distDir="$script:DIST_BASE\cpu" write-host "Building LCD CPU" @@ -239,7 +239,7 @@ function build_cpu($gen_arch) { function build_cpu_avx() { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) { init_vars - $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs + $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs $script:buildDir="../build/windows/${script:ARCH}/cpu_avx" $script:distDir="$script:DIST_BASE\cpu_avx" write-host "Building AVX CPU" @@ -254,7 +254,7 @@ function build_cpu_avx() { function build_cpu_avx2() { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) { init_vars - $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=on", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=on", "-DLLAMA_F16C=on") + $script:cmakeDefs + $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=on", "-DGGML_AVX512=off", "-DGGML_FMA=on", "-DGGML_F16C=on") + $script:cmakeDefs $script:buildDir="../build/windows/${script:ARCH}/cpu_avx2" $script:distDir="$script:DIST_BASE\cpu_avx2" write-host "Building AVX2 CPU" @@ -279,9 +279,9 @@ function build_cuda() { $script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT" $script:cmakeDefs += @( "-A", "x64", - "-DLLAMA_CUDA=ON", - "-DLLAMA_AVX=on", - "-DLLAMA_AVX2=off", + "-DGGML_CUDA=ON", + "-DGGML_AVX=on", + "-DGGML_AVX2=off", "-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR", "-DCMAKE_CUDA_FLAGS=-t8", "-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}" @@ -319,7 +319,7 @@ function build_oneapi() { $script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT" $script:cmakeDefs += @( "-G", "MinGW Makefiles", - "-DLLAMA_SYCL=ON", + "-DGGML_SYCL=ON", "-DCMAKE_C_COMPILER=icx", "-DCMAKE_CXX_COMPILER=icx", "-DCMAKE_BUILD_TYPE=Release" @@ -365,10 +365,10 @@ function build_rocm() { "-G", "Ninja", "-DCMAKE_C_COMPILER=clang.exe", "-DCMAKE_CXX_COMPILER=clang++.exe", - "-DLLAMA_HIPBLAS=on", + "-DGGML_HIPBLAS=on", "-DHIP_PLATFORM=amd", - "-DLLAMA_AVX=on", - "-DLLAMA_AVX2=off", + "-DGGML_AVX=on", + "-DGGML_AVX2=off", "-DCMAKE_POSITION_INDEPENDENT_CODE=on", "-DAMDGPU_TARGETS=$(amdGPUs)", "-DGPU_TARGETS=$(amdGPUs)" diff --git a/llm/llama.cpp b/llm/llama.cpp index 7c26775ad..d7fd29fff 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit 7c26775adb579e92b59c82e8084c07a1d0f75e9c +Subproject commit d7fd29fff16456ce9c3a23fd2d09a66256b05aff diff --git a/llm/llm.go b/llm/llm.go index 2a0c4b91a..157176246 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,12 +1,12 @@ package llm -// #cgo CFLAGS: -Illama.cpp -// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/libllama.a -lstdc++ -// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/libllama.a -lstdc++ -// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/libllama.a -static -lstdc++ -// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/libllama.a -static -lstdc++ -// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/libllama.a -lstdc++ -// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/libllama.a -lstdc++ +// #cgo CFLAGS: -Illama.cpp/include -Illama.cpp/ggml/include +// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/src/libllama.a ${SRCDIR}/build/darwin/arm64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal +// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/src/libllama.a ${SRCDIR}/build/darwin/x86_64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal +// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/src/libllama.a ${SRCDIR}/build/windows/amd64_static/ggml/src/libggml.a -static -lstdc++ +// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/src/libllama.a ${SRCDIR}/build/windows/arm64_static/ggml/src/libggml.a -static -lstdc++ +// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/src/libllama.a ${SRCDIR}/build/linux/x86_64_static/ggml/src/libggml.a -lstdc++ +// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml. -lstdc++ // #include // #include "llama.h" import "C" diff --git a/llm/patches/01-load-progress.diff b/llm/patches/01-load-progress.diff index be5286091..a053c1c2c 100644 --- a/llm/patches/01-load-progress.diff +++ b/llm/patches/01-load-progress.diff @@ -1,8 +1,8 @@ diff --git a/common/common.cpp b/common/common.cpp -index 73ff0e85..6adb1a92 100644 +index 2c05a4d4..927f0e3d 100644 --- a/common/common.cpp +++ b/common/common.cpp -@@ -2447,6 +2447,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & +@@ -2093,6 +2093,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & mparams.use_mmap = params.use_mmap; mparams.use_mlock = params.use_mlock; mparams.check_tensors = params.check_tensors; @@ -12,10 +12,10 @@ index 73ff0e85..6adb1a92 100644 mparams.kv_overrides = NULL; } else { diff --git a/common/common.h b/common/common.h -index 58ed72f4..0bb2605e 100644 +index 65c0ef81..ebca2c77 100644 --- a/common/common.h +++ b/common/common.h -@@ -180,6 +180,13 @@ struct gpt_params { +@@ -184,6 +184,13 @@ struct gpt_params { std::string mmproj = ""; // path to multimodal projector std::vector image; // path to image file(s) @@ -26,6 +26,6 @@ index 58ed72f4..0bb2605e 100644 + // context pointer passed to the progress callback + void * progress_callback_user_data; + - // server params - int32_t port = 8080; // server listens on this network port - int32_t timeout_read = 600; // http read timeout in seconds + // embedding + bool embedding = false; // get only sentence embedding + int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm) diff --git a/llm/patches/03-load_exception.diff b/llm/patches/03-load_exception.diff index eb245c2a9..026661963 100644 --- a/llm/patches/03-load_exception.diff +++ b/llm/patches/03-load_exception.diff @@ -1,17 +1,8 @@ -From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001 -From: Michael Yang -Date: Thu, 23 May 2024 11:18:45 -0700 -Subject: [PATCH] throw exception on load errors - ---- - llama.cpp | 25 ++++++++++++++++--------- - 1 file changed, 16 insertions(+), 9 deletions(-) - -diff --git a/llama.cpp b/llama.cpp -index 15c66077..8ba90b6a 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam +diff --git a/src/llama.cpp b/src/llama.cpp +index 73f52435..58a00fb1 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -7241,7 +7241,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam } } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); @@ -20,7 +11,7 @@ index 15c66077..8ba90b6a 100644 } return 0; -@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file( +@@ -17564,16 +17564,23 @@ struct llama_model * llama_load_model_from_file( } model->rpc_servers.push_back(servers); } @@ -52,6 +43,3 @@ index 15c66077..8ba90b6a 100644 } return model; --- -2.45.1 - diff --git a/llm/patches/04-metal.diff b/llm/patches/04-metal.diff index f8fa7db76..e63732e70 100644 --- a/llm/patches/04-metal.diff +++ b/llm/patches/04-metal.diff @@ -1,7 +1,7 @@ -diff --git a/ggml-metal.m b/ggml-metal.m +diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m index 0207b787..b5e9884b 100644 ---- a/ggml-metal.m -+++ b/ggml-metal.m +--- a/ggml/src/ggml-metal.m ++++ b/ggml/src/ggml-metal.m @@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute( // to the matrix-vector kernel int ne11_mm_min = 1; diff --git a/llm/patches/05-default-pretokenizer.diff b/llm/patches/05-default-pretokenizer.diff index 2a2e7306e..f4eaced72 100644 --- a/llm/patches/05-default-pretokenizer.diff +++ b/llm/patches/05-default-pretokenizer.diff @@ -1,8 +1,8 @@ -diff --git a/llama.cpp b/llama.cpp -index 61948751..4b72a293 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -4824,16 +4824,7 @@ static void llm_load_vocab( +diff --git a/src/llama.cpp b/src/llama.cpp +index 73f52435..2b81b4bd 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -5092,16 +5092,7 @@ static void llm_load_vocab( // for now, only BPE models have pre-tokenizers if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { @@ -20,13 +20,13 @@ index 61948751..4b72a293 100644 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } else if ( tokenizer_pre == "llama3" || -@@ -4888,7 +4879,8 @@ static void llm_load_vocab( - tokenizer_pre == "poro-chat") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO; +@@ -5164,7 +5155,8 @@ static void llm_load_vocab( + tokenizer_pre == "jais") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS; } else { - throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); + LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__); + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } - } else { + } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; diff --git a/llm/patches/06-qwen2.diff b/llm/patches/06-qwen2.diff index d7b0c1555..1c7109f6f 100644 --- a/llm/patches/06-qwen2.diff +++ b/llm/patches/06-qwen2.diff @@ -1,7 +1,7 @@ -diff --git a/llama.cpp b/llama.cpp +diff --git a/src/llama.cpp b/src/llama.cpp index 40d2ec2c..f34eb79a 100644 ---- a/llama.cpp -+++ b/llama.cpp +--- a/src/llama.cpp ++++ b/src/llama.cpp @@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); cb(kq, "kq", il); diff --git a/llm/patches/07-embeddings.diff b/llm/patches/07-embeddings.diff new file mode 100644 index 000000000..a84e3b06c --- /dev/null +++ b/llm/patches/07-embeddings.diff @@ -0,0 +1,45 @@ +diff --git a/src/llama.cpp b/src/llama.cpp +index 1fe2b9f7..a43312a7 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -13689,7 +13689,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) { + const auto n_embd = hparams.n_embd; + + // TODO: use a per-batch flag for logits presence instead +- const bool has_logits = !cparams.embeddings; ++ const bool has_logits = cparams.causal_attn; + const bool has_embd = lctx.is_encoding || (cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE)); + + const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0; +@@ -13959,17 +13959,25 @@ static int llama_decode_internal( + // no output + res = nullptr; + embd = nullptr; +- } else if (cparams.embeddings) { +- res = nullptr; // do not extract logits for embedding case +- embd = gf->nodes[gf->n_nodes - 1]; +- if (strcmp(embd->name, "result_embd_pooled") != 0) { +- embd = gf->nodes[gf->n_nodes - 2]; ++ } ++ ++ if (cparams.embeddings) { ++ for (int i = gf->n_nodes - 1; i >= 0; --i) { ++ embd = gf->nodes[i]; ++ if (strcmp(embd->name, "result_embd_pooled") == 0) { ++ break; ++ } + } + GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor"); +- } else { ++ } else { + embd = nullptr; // do not extract embeddings when not needed + GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor"); + } ++ ++ if (!cparams.causal_attn) { ++ res = nullptr; // do not extract logits when not needed ++ } ++ + // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); + + ggml_backend_sched_alloc_graph(lctx.sched, gf); diff --git a/llm/patches/07-gemma.diff b/llm/patches/07-gemma.diff deleted file mode 100644 index 86eac3d17..000000000 --- a/llm/patches/07-gemma.diff +++ /dev/null @@ -1,305 +0,0 @@ -From 5cadb45f39d001ffbad95b690d6cf0abcb4a6d96 Mon Sep 17 00:00:00 2001 -From: Ollama maintainers -Date: Wed, 26 Jun 2024 16:18:09 -0700 -Subject: [PATCH] Architecture support - ---- - llama.cpp | 194 +++++++++++++++++++++++++++++++++++++++++++++++++++++- - 1 file changed, 193 insertions(+), 1 deletion(-) - -diff --git a/llama.cpp b/llama.cpp -index 61948751..3b4196f5 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -217,6 +217,7 @@ enum llm_arch { - LLM_ARCH_INTERNLM2, - LLM_ARCH_MINICPM, - LLM_ARCH_GEMMA, -+ LLM_ARCH_GEMMA2, - LLM_ARCH_STARCODER2, - LLM_ARCH_MAMBA, - LLM_ARCH_XVERSE, -@@ -255,6 +256,7 @@ static const std::map LLM_ARCH_NAMES = { - { LLM_ARCH_INTERNLM2, "internlm2" }, - { LLM_ARCH_MINICPM, "minicpm" }, - { LLM_ARCH_GEMMA, "gemma" }, -+ { LLM_ARCH_GEMMA2, "gemma2" }, - { LLM_ARCH_STARCODER2, "starcoder2" }, - { LLM_ARCH_MAMBA, "mamba" }, - { LLM_ARCH_XVERSE, "xverse" }, -@@ -464,10 +466,12 @@ enum llm_tensor { - LLM_TENSOR_ATTN_NORM, - LLM_TENSOR_ATTN_NORM_2, - LLM_TENSOR_ATTN_OUT_NORM, -+ LLM_TENSOR_ATTN_POST_NORM, - LLM_TENSOR_ATTN_ROT_EMBD, - LLM_TENSOR_FFN_GATE_INP, - LLM_TENSOR_FFN_GATE_INP_SHEXP, - LLM_TENSOR_FFN_NORM, -+ LLM_TENSOR_FFN_POST_NORM, - LLM_TENSOR_FFN_GATE, - LLM_TENSOR_FFN_DOWN, - LLM_TENSOR_FFN_UP, -@@ -960,6 +964,24 @@ static const std::map> LLM_TENSOR_NA - { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, - }, - }, -+ { -+ LLM_ARCH_GEMMA2, -+ { -+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, -+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, -+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, -+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, -+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, -+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, -+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, -+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, -+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, -+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, -+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, -+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, -+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, -+ }, -+ }, - { - LLM_ARCH_STARCODER2, - { -@@ -1941,6 +1963,8 @@ enum e_model { - MODEL_8x22B, - MODEL_16x12B, - MODEL_10B_128x3_66B, -+ MODEL_9B, -+ MODEL_27B, - }; - - static const size_t kiB = 1024; -@@ -2114,6 +2138,7 @@ struct llama_layer { - struct ggml_tensor * attn_out_norm_b; - struct ggml_tensor * attn_q_a_norm; - struct ggml_tensor * attn_kv_a_norm; -+ struct ggml_tensor * attn_post_norm; - - // attention - struct ggml_tensor * wq; -@@ -2136,6 +2161,7 @@ struct llama_layer { - // normalization - struct ggml_tensor * ffn_norm; - struct ggml_tensor * ffn_norm_b; -+ struct ggml_tensor * ffn_post_norm; - struct ggml_tensor * layer_out_norm; - struct ggml_tensor * layer_out_norm_b; - struct ggml_tensor * ffn_norm_exps; -@@ -4529,6 +4555,16 @@ static void llm_load_hparams( - } - } break; - case LLM_ARCH_GEMMA: -+ { -+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); -+ -+ switch (hparams.n_layer) { -+ case 18: model.type = e_model::MODEL_9B; break; -+ case 28: model.type = e_model::MODEL_27B; break; -+ default: model.type = e_model::MODEL_UNKNOWN; -+ } -+ } break; -+ case LLM_ARCH_GEMMA2: - { - ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - -@@ -6305,6 +6341,40 @@ static bool llm_load_tensors( - layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); - } - } break; -+ case LLM_ARCH_GEMMA2: -+ { -+ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); -+ -+ // output -+ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); -+ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading -+ -+ const int64_t n_ff = hparams.n_ff; -+ const int64_t n_embd_head_k = hparams.n_embd_head_k; -+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); -+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); -+ -+ for (uint32_t i = 0; i < n_layer; ++i) { -+ ggml_context * ctx_layer = ctx_for_layer(i); -+ ggml_context * ctx_split = ctx_for_layer_split(i); -+ -+ auto & layer = model.layers[i]; -+ -+ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); -+ -+ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * hparams.n_head}); -+ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}); -+ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}); -+ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * hparams.n_head, n_embd}); -+ layer.attn_post_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}); -+ -+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); -+ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); -+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); -+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); -+ layer.ffn_post_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}); -+ } -+ } break; - case LLM_ARCH_STARCODER2: - { - model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); -@@ -10614,6 +10684,123 @@ struct llm_build_context { - return gf; - } - -+ struct ggml_cgraph * build_gemma2() { -+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); -+ -+ const int64_t n_embd_head_k = hparams.n_embd_head_k; -+ -+ struct ggml_tensor * cur; -+ struct ggml_tensor * inpL; -+ -+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb); -+ -+ inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); -+ cb(inpL, "inp_scaled", -1); -+ -+ // inp_pos - contains the positions -+ struct ggml_tensor * inp_pos = build_inp_pos(); -+ -+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads) -+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); -+ -+ for (int il = 0; il < n_layer; ++il) { -+ // norm -+ cur = llm_build_norm(ctx0, inpL, hparams, -+ model.layers[il].attn_norm, NULL, -+ LLM_NORM_RMS, cb, il); -+ cb(cur, "attn_norm", il); -+ -+ // self-attention -+ { -+ // compute Q and K and RoPE them -+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); -+ cb(Qcur, "Qcur", il); -+ -+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); -+ cb(Kcur, "Kcur", il); -+ -+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); -+ cb(Vcur, "Vcur", il); -+ -+ Qcur = ggml_rope_ext( -+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, -+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale, -+ ext_factor, attn_factor, beta_fast, beta_slow); -+ cb(Qcur, "Qcur", il); -+ -+ Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); -+ cb(Qcur, "Qcur_scaled", il); -+ -+ Kcur = ggml_rope_ext( -+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, -+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale, -+ ext_factor, attn_factor, beta_fast, beta_slow); -+ cb(Kcur, "Kcur", il); -+ -+ cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, -+ model.layers[il].wo, NULL, -+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); -+ } -+ -+ if (il == n_layer - 1) { -+ // skip computing output for unused tokens -+ struct ggml_tensor * inp_out_ids = build_inp_out_ids(); -+ cur = ggml_get_rows(ctx0, cur, inp_out_ids); -+ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); -+ } -+ -+ cur = llm_build_norm(ctx0, cur, hparams, -+ model.layers[il].attn_post_norm, NULL, -+ LLM_NORM_RMS, cb, il); -+ cb(cur, "attn_post_norm", il); -+ -+ struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); -+ cb(sa_out, "sa_out", il); -+ -+ cur = llm_build_norm(ctx0, sa_out, hparams, -+ model.layers[il].ffn_norm, NULL, -+ LLM_NORM_RMS, cb, il); -+ cb(cur, "ffn_norm", il); -+ -+ // feed-forward network -+ { -+ cur = llm_build_ffn(ctx0, cur, -+ model.layers[il].ffn_up, NULL, -+ model.layers[il].ffn_gate, NULL, -+ model.layers[il].ffn_down, NULL, -+ NULL, -+ LLM_FFN_GELU, LLM_FFN_PAR, cb, il); -+ cb(cur, "ffn_out", il); -+ } -+ -+ cur = llm_build_norm(ctx0, cur, hparams, -+ model.layers[il].ffn_post_norm, NULL, -+ LLM_NORM_RMS, cb, -1); -+ cb(cur, "ffn_post_norm", -1); -+ -+ cur = ggml_add(ctx0, cur, sa_out); -+ cb(cur, "l_out", il); -+ -+ // input for next layer -+ inpL = cur; -+ } -+ -+ cur = inpL; -+ -+ cur = llm_build_norm(ctx0, cur, hparams, -+ model.output_norm, NULL, -+ LLM_NORM_RMS, cb, -1); -+ cb(cur, "result_norm", -1); -+ -+ // lm_head -+ cur = ggml_mul_mat(ctx0, model.output, cur); -+ cb(cur, "result_output", -1); -+ -+ ggml_build_forward_expand(gf, cur); -+ -+ return gf; -+ } -+ - struct ggml_cgraph * build_starcoder2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); - -@@ -11847,6 +12034,10 @@ static struct ggml_cgraph * llama_build_graph( - { - result = llm.build_gemma(); - } break; -+ case LLM_ARCH_GEMMA2: -+ { -+ result = llm.build_gemma2(); -+ } break; - case LLM_ARCH_STARCODER2: - { - result = llm.build_starcoder2(); -@@ -16671,6 +16862,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { - case LLM_ARCH_PHI2: - case LLM_ARCH_PHI3: - case LLM_ARCH_GEMMA: -+ case LLM_ARCH_GEMMA2: - case LLM_ARCH_STARCODER2: - case LLM_ARCH_GPTNEOX: - return LLAMA_ROPE_TYPE_NEOX; -@@ -18551,7 +18743,7 @@ static int32_t llama_chat_apply_template_internal( - if (add_ass) { - ss << "assistant\n"; - } -- } else if (tmpl == "gemma" || tmpl.find("") != std::string::npos) { -+ } else if (tmpl == "gemma" || tmpl == "gemma2" || tmpl.find("") != std::string::npos) { - // google/gemma-7b-it - std::string system_prompt = ""; - for (auto message : chat) { --- -2.45.2 - diff --git a/llm/patches/09-pooling.diff b/llm/patches/09-pooling.diff index 348fbfdc4..2e4fe11ee 100644 --- a/llm/patches/09-pooling.diff +++ b/llm/patches/09-pooling.diff @@ -1,8 +1,8 @@ -diff --git a/llama.cpp b/llama.cpp -index 61948751..61fe7b57 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -7591,14 +7591,14 @@ struct llm_build_context { +diff --git a/src/llama.cpp b/src/llama.cpp +index 721b8f4e..cfe7ac40 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -8420,14 +8420,14 @@ struct llm_build_context { } struct ggml_tensor * build_inp_mean() { @@ -19,7 +19,7 @@ index 61948751..61fe7b57 100644 cb(lctx.inp_cls, "inp_cls", -1); ggml_set_input(lctx.inp_cls); return lctx.inp_cls; -@@ -12062,19 +12062,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { +@@ -13847,19 +13847,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer)); float * data = (float *) lctx.inp_mean->data; @@ -42,7 +42,7 @@ index 61948751..61fe7b57 100644 const uint64_t s = sum[i]; if (s > 0) { div[i] = 1.0f/float(s); -@@ -12094,14 +12091,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { +@@ -13879,14 +13876,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); uint32_t * data = (uint32_t *) lctx.inp_cls->data; From 78fb33dd07ecbbd78de2293bc542187afa6b671b Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 15:18:36 -0400 Subject: [PATCH 059/106] fix typo in cgo directives in `llm.go` (#5501) --- llm/llm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/llm.go b/llm/llm.go index 157176246..fb6d4b5c7 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -6,7 +6,7 @@ package llm // #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/src/libllama.a ${SRCDIR}/build/windows/amd64_static/ggml/src/libggml.a -static -lstdc++ // #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/src/libllama.a ${SRCDIR}/build/windows/arm64_static/ggml/src/libggml.a -static -lstdc++ // #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/src/libllama.a ${SRCDIR}/build/linux/x86_64_static/ggml/src/libggml.a -lstdc++ -// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml. -lstdc++ +// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml.a -lstdc++ // #include // #include "llama.h" import "C" From 269ed6e6a2cea822ab137d40d5c70c8bf09470f8 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 17 Jun 2024 10:38:55 -0700 Subject: [PATCH 060/106] update message processing --- server/images.go | 17 +- server/prompt.go | 241 ++++-------------- server/prompt_test.go | 317 ++++++++++++------------ server/routes.go | 508 ++++++++++++-------------------------- template/template.go | 169 ++++++++++++- template/template_test.go | 153 +++++++++++- 6 files changed, 685 insertions(+), 720 deletions(-) diff --git a/server/images.go b/server/images.go index a62991f16..688d5dcae 100644 --- a/server/images.go +++ b/server/images.go @@ -34,6 +34,8 @@ import ( "github.com/ollama/ollama/version" ) +var errCapabilityCompletion = errors.New("completion") + type Capability string const CapabilityCompletion = Capability("completion") @@ -62,7 +64,10 @@ type Model struct { Template *template.Template } -func (m *Model) Has(caps ...Capability) bool { +// CheckCapabilities checks if the model has the specified capabilities returning an error describing +// any missing or unknown capabilities +func (m *Model) CheckCapabilities(caps ...Capability) error { + var errs []error for _, cap := range caps { switch cap { case CapabilityCompletion: @@ -81,15 +86,19 @@ func (m *Model) Has(caps ...Capability) bool { } if _, ok := ggml.KV()[fmt.Sprintf("%s.pooling_type", ggml.KV().Architecture())]; ok { - return false + errs = append(errs, errCapabilityCompletion) } default: slog.Error("unknown capability", "capability", cap) - return false + return fmt.Errorf("unknown capability: %s", cap) } } - return true + if err := errors.Join(errs...); err != nil { + return fmt.Errorf("missing capabilities: %w", errors.Join(errs...)) + } + + return nil } func (m *Model) String() string { diff --git a/server/prompt.go b/server/prompt.go index bfc319a50..5016fbe14 100644 --- a/server/prompt.go +++ b/server/prompt.go @@ -1,217 +1,74 @@ package server import ( - "fmt" + "bytes" + "context" "log/slog" - "strings" - - "text/template/parse" + "slices" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/llm" "github.com/ollama/ollama/template" ) -// isResponseNode checks if the node contains .Response -func isResponseNode(node *parse.ActionNode) bool { - for _, cmd := range node.Pipe.Cmds { - for _, arg := range cmd.Args { - if fieldNode, ok := arg.(*parse.FieldNode); ok && len(fieldNode.Ident) > 0 { - if fieldNode.Ident[0] == "Response" { - return true - } - } +func chatPrompt(ctx context.Context, r *runnerRef, msgs []api.Message) (prompt string, images []llm.ImageData, _ error) { + // extract system messages which should always be included + var system []api.Message + msgs = slices.DeleteFunc(msgs, func(m api.Message) bool { + if m.Role == "system" { + system = append(system, m) + return true } - } - return false -} -// formatTemplateForResponse formats the template AST to: -// 1. remove all nodes after the first .Response (if generate=true) -// 2. add a .Response node to the end if it doesn't exist -// TODO(jmorganca): this should recursively cut the template before the first .Response -func formatTemplateForResponse(tmpl *template.Template, generate bool) { - var found bool - for i, node := range tmpl.Tree.Root.Nodes { - if actionNode, ok := node.(*parse.ActionNode); ok { - if isResponseNode(actionNode) { - found = true - if generate { - tmpl.Tree.Root.Nodes = tmpl.Tree.Root.Nodes[:i+1] - break - } - } + return false + }) + + if len(system) == 0 && r.model.System != "" { + // add model system prompt since it wasn't provided + system = append(system, api.Message{Role: "system", Content: r.model.System}) + } + + n := len(msgs) - 1 + for i := n - 1; i >= 0; i-- { + var b bytes.Buffer + if err := r.model.Template.Execute(&b, template.Values{Messages: append(system, msgs[i:]...)}); err != nil { + return "", nil, err } - } - if !found { - // add the response node if it doesn't exist - responseFieldNode := &parse.FieldNode{NodeType: parse.NodeField, Ident: []string{"Response"}} - responsePipeNode := &parse.PipeNode{NodeType: parse.NodePipe, Cmds: []*parse.CommandNode{{NodeType: parse.NodeCommand, Args: []parse.Node{responseFieldNode}}}} - responseActionNode := &parse.ActionNode{NodeType: parse.NodeAction, Pipe: responsePipeNode} - tmpl.Tree.Root.Nodes = append(tmpl.Tree.Root.Nodes, responseActionNode) - } -} - -// Prompt renders a prompt from a template. If generate is set to true, -// the response and parts of the template following it are not rendered -func Prompt(tmpl *template.Template, system, prompt, response string, generate bool) (string, error) { - formatTemplateForResponse(tmpl, generate) - - vars := map[string]any{ - "System": system, - "Prompt": prompt, - "Response": response, - } - - var sb strings.Builder - if err := tmpl.Execute(&sb, vars); err != nil { - return "", err - } - - return sb.String(), nil -} - -func countTokens(tmpl *template.Template, system string, prompt string, response string, encode func(string) ([]int, error)) (int, error) { - rendered, err := Prompt(tmpl, system, prompt, response, false) - if err != nil { - return 0, err - } - - tokens, err := encode(rendered) - if err != nil { - slog.Error("failed to encode prompt", "err", err) - return 0, err - } - - return len(tokens), err -} - -// ChatPrompt builds up a prompt from a series of messages, truncating based on context window size -func ChatPrompt(tmpl *template.Template, messages []api.Message, window int, encode func(string) ([]int, error)) (string, error) { - type prompt struct { - System string - Prompt string - Response string - - images []int - tokens int - } - - var p prompt - - // iterate through messages to build up {system,user,response} prompts - var imgId int - var prompts []prompt - for _, msg := range messages { - switch strings.ToLower(msg.Role) { - case "system": - if p.System != "" || p.Prompt != "" || p.Response != "" { - prompts = append(prompts, p) - p = prompt{} - } - - p.System = msg.Content - case "user": - if p.Prompt != "" || p.Response != "" { - prompts = append(prompts, p) - p = prompt{} - } - - var sb strings.Builder - for range msg.Images { - fmt.Fprintf(&sb, "[img-%d] ", imgId) - p.images = append(p.images, imgId) - imgId += 1 - } - - sb.WriteString(msg.Content) - p.Prompt = sb.String() - case "assistant": - if p.Response != "" { - prompts = append(prompts, p) - p = prompt{} - } - - p.Response = msg.Content - default: - return "", fmt.Errorf("invalid role: %s, role must be one of [system, user, assistant]", msg.Role) - } - } - - // add final prompt - if p.System != "" || p.Prompt != "" || p.Response != "" { - prompts = append(prompts, p) - } - - // calculate token lengths for each prompt, estimating 768 tokens per images - for i, p := range prompts { - tokens, err := countTokens(tmpl, p.System, p.Prompt, p.Response, encode) + s, err := r.llama.Tokenize(ctx, b.String()) if err != nil { - return "", err + return "", nil, err } - prompts[i].tokens = tokens + len(prompts[i].images)*768 - } - - // truncate images and prompts starting from the beginning of the list - // until either one prompt remains or the total tokens fits the context window - // TODO (jmorganca): this doesn't account for the context window room required for the response - for { - var required int - for _, p := range prompts { - required += p.tokens + c := len(s) + if r.model.ProjectorPaths != nil { + for _, m := range msgs[i:] { + // TODO: get image embedding length from project metadata + c += 768 * len(m.Images) + } } - required += 1 // for bos token - - if required <= window { - slog.Debug("prompt now fits in context window", "required", required, "window", window) + if c > r.NumCtx { + slog.Debug("truncating input messages which exceed context length", "truncated", len(msgs[i:])) break + } else { + n = i } - - prompt := &prompts[0] - - if len(prompt.images) > 1 { - img := prompt.images[0] - slog.Debug("prompt longer than context window, removing image", "id", img, "required", required, "window", window) - prompt.images = prompt.images[1:] - prompt.Prompt = strings.Replace(prompt.Prompt, fmt.Sprintf(" [img-%d]", img), "", 1) - prompt.tokens -= 768 - continue - } - - if len(prompts) > 1 { - slog.Debug("required tokens longer than context window, removing first prompt", "prompt", prompts[0].tokens, "required", required, "window", window) - system := prompt.System - prompts = prompts[1:] - - if system != "" && prompts[0].System == "" { - prompts[0].System = system - - tokens, err := countTokens(tmpl, prompts[0].System, prompts[0].Prompt, prompts[0].Response, encode) - if err != nil { - return "", err - } - - prompts[0].tokens = tokens + len(prompts[0].images)*768 - } - - continue - } - - // stop truncating if there's only one prompt left - break } - var sb strings.Builder - for i, p := range prompts { - // last prompt should leave the response unrendered (for completion) - rendered, err := Prompt(tmpl, p.System, p.Prompt, p.Response, i == len(prompts)-1) - if err != nil { - return "", err - } - sb.WriteString(rendered) + var b bytes.Buffer + if err := r.model.Template.Execute(&b, template.Values{Messages: append(system, msgs[n:]...)}); err != nil { + return "", nil, err } - return sb.String(), nil + for _, m := range msgs[n:] { + for _, i := range m.Images { + images = append(images, llm.ImageData{ + ID: len(images), + Data: i, + }) + } + } + + return b.String(), images, nil } diff --git a/server/prompt_test.go b/server/prompt_test.go index 7df58d0bd..59288b46c 100644 --- a/server/prompt_test.go +++ b/server/prompt_test.go @@ -1,215 +1,214 @@ package server import ( + "bytes" + "context" "strings" "testing" "github.com/ollama/ollama/api" + "github.com/ollama/ollama/llm" "github.com/ollama/ollama/template" ) -func TestPrompt(t *testing.T) { - tests := []struct { - name string - template string - system string - prompt string - response string - generate bool - want string - }{ - { - name: "simple prompt", - template: "[INST] {{ .System }} {{ .Prompt }} [/INST]", - system: "You are a Wizard.", - prompt: "What are the potion ingredients?", - want: "[INST] You are a Wizard. What are the potion ingredients? [/INST]", - }, - { - name: "implicit response", - template: "[INST] {{ .System }} {{ .Prompt }} [/INST]", - system: "You are a Wizard.", - prompt: "What are the potion ingredients?", - response: "I don't know.", - want: "[INST] You are a Wizard. What are the potion ingredients? [/INST]I don't know.", - }, - { - name: "response", - template: "[INST] {{ .System }} {{ .Prompt }} [/INST] {{ .Response }}", - system: "You are a Wizard.", - prompt: "What are the potion ingredients?", - response: "I don't know.", - want: "[INST] You are a Wizard. What are the potion ingredients? [/INST] I don't know.", - }, - { - name: "cut", - template: "{{ .System }}{{ .Prompt }}{{ .Response }}", - system: "You are a Wizard.", - prompt: "What are the potion ingredients?", - response: "I don't know.", - generate: true, - want: "You are a Wizard.What are the potion ingredients?I don't know.", - }, - { - name: "nocut", - template: "{{ .System }}{{ .Prompt }}{{ .Response }}", - system: "You are a Wizard.", - prompt: "What are the potion ingredients?", - response: "I don't know.", - want: "You are a Wizard.What are the potion ingredients?I don't know.", - }, +type mock struct { + llm.LlamaServer +} + +func (m mock) Tokenize(_ context.Context, s string) (tokens []int, err error) { + for range strings.Fields(s) { + tokens = append(tokens, len(tokens)) } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - tmpl, err := template.Parse(tc.template) - if err != nil { - t.Fatal(err) - } - - got, err := Prompt(tmpl, tc.system, tc.prompt, tc.response, tc.generate) - if err != nil { - t.Errorf("error = %v", err) - } - - if got != tc.want { - t.Errorf("got = %v, want %v", got, tc.want) - } - }) - } + return } func TestChatPrompt(t *testing.T) { - tests := []struct { - name string - template string - messages []api.Message - window int - want string + type expect struct { + prompt string + images [][]byte + } + + cases := []struct { + name string + limit int + msgs []api.Message + expect }{ { - name: "simple prompt", - template: "[INST] {{ .Prompt }} [/INST]", - messages: []api.Message{ - {Role: "user", Content: "Hello"}, + name: "messages", + limit: 64, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry!"}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, + }, + expect: expect{ + prompt: "You're a test, Harry! I-I'm a what? A test. And a thumping good one at that, I'd wager. ", }, - window: 1024, - want: "[INST] Hello [/INST]", }, { - name: "with system message", - template: "[INST] {{ if .System }}<>{{ .System }}<> {{ end }}{{ .Prompt }} [/INST]", - messages: []api.Message{ - {Role: "system", Content: "You are a Wizard."}, - {Role: "user", Content: "Hello"}, + name: "truncate messages", + limit: 1, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry!"}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, + }, + expect: expect{ + prompt: "A test. And a thumping good one at that, I'd wager. ", }, - window: 1024, - want: "[INST] <>You are a Wizard.<> Hello [/INST]", }, { - name: "with response", - template: "[INST] {{ if .System }}<>{{ .System }}<> {{ end }}{{ .Prompt }} [/INST] {{ .Response }}", - messages: []api.Message{ - {Role: "system", Content: "You are a Wizard."}, - {Role: "user", Content: "Hello"}, - {Role: "assistant", Content: "I am?"}, + name: "truncate messages with image", + limit: 64, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry!"}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("something")}}, + }, + expect: expect{ + prompt: "[img-0] A test. And a thumping good one at that, I'd wager. ", + images: [][]byte{ + []byte("something"), + }, }, - window: 1024, - want: "[INST] <>You are a Wizard.<> Hello [/INST] I am?", }, { - name: "with implicit response", - template: "[INST] {{ if .System }}<>{{ .System }}<> {{ end }}{{ .Prompt }} [/INST]", - messages: []api.Message{ - {Role: "system", Content: "You are a Wizard."}, - {Role: "user", Content: "Hello"}, - {Role: "assistant", Content: "I am?"}, + name: "truncate messages with images", + limit: 64, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry!", Images: []api.ImageData{[]byte("something")}}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("somethingelse")}}, + }, + expect: expect{ + prompt: "[img-0] A test. And a thumping good one at that, I'd wager. ", + images: [][]byte{ + []byte("somethingelse"), + }, }, - window: 1024, - want: "[INST] <>You are a Wizard.<> Hello [/INST]I am?", }, { - name: "with conversation", - template: "[INST] {{ if .System }}<>{{ .System }}<> {{ end }}{{ .Prompt }} [/INST] {{ .Response }} ", - messages: []api.Message{ - {Role: "system", Content: "You are a Wizard."}, - {Role: "user", Content: "What are the potion ingredients?"}, - {Role: "assistant", Content: "sugar"}, - {Role: "user", Content: "Anything else?"}, + name: "messages with images", + limit: 2048, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry!", Images: []api.ImageData{[]byte("something")}}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("somethingelse")}}, + }, + expect: expect{ + prompt: "[img-0] You're a test, Harry! I-I'm a what? [img-1] A test. And a thumping good one at that, I'd wager. ", + images: [][]byte{ + []byte("something"), + []byte("somethingelse"), + }, }, - window: 1024, - want: "[INST] <>You are a Wizard.<> What are the potion ingredients? [/INST] sugar [INST] Anything else? [/INST] ", }, { - name: "with truncation", - template: "{{ .System }} {{ .Prompt }} {{ .Response }} ", - messages: []api.Message{ - {Role: "system", Content: "You are a Wizard."}, - {Role: "user", Content: "Hello"}, - {Role: "assistant", Content: "I am?"}, - {Role: "user", Content: "Why is the sky blue?"}, - {Role: "assistant", Content: "The sky is blue from rayleigh scattering"}, + name: "message with image tag", + limit: 2048, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry! [img]", Images: []api.ImageData{[]byte("something")}}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager.", Images: []api.ImageData{[]byte("somethingelse")}}, + }, + expect: expect{ + prompt: "You're a test, Harry! [img-0] I-I'm a what? [img-1] A test. And a thumping good one at that, I'd wager. ", + images: [][]byte{ + []byte("something"), + []byte("somethingelse"), + }, }, - window: 10, - want: "You are a Wizard. Why is the sky blue? The sky is blue from rayleigh scattering", }, { - name: "images", - template: "{{ .System }} {{ .Prompt }}", - messages: []api.Message{ - {Role: "system", Content: "You are a Wizard."}, - {Role: "user", Content: "Hello", Images: []api.ImageData{[]byte("base64")}}, + name: "messages with interleaved images", + limit: 2048, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry!"}, + {Role: "user", Images: []api.ImageData{[]byte("something")}}, + {Role: "user", Images: []api.ImageData{[]byte("somethingelse")}}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, + }, + expect: expect{ + prompt: "You're a test, Harry!\n\n[img-0]\n\n[img-1] I-I'm a what? A test. And a thumping good one at that, I'd wager. ", + images: [][]byte{ + []byte("something"), + []byte("somethingelse"), + }, }, - window: 1024, - want: "You are a Wizard. [img-0] Hello", }, { - name: "images truncated", - template: "{{ .System }} {{ .Prompt }}", - messages: []api.Message{ - {Role: "system", Content: "You are a Wizard."}, - {Role: "user", Content: "Hello", Images: []api.ImageData{[]byte("img1"), []byte("img2")}}, + name: "truncate message with interleaved images", + limit: 1024, + msgs: []api.Message{ + {Role: "user", Content: "You're a test, Harry!"}, + {Role: "user", Images: []api.ImageData{[]byte("something")}}, + {Role: "user", Images: []api.ImageData{[]byte("somethingelse")}}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, + }, + expect: expect{ + prompt: "[img-0] I-I'm a what? A test. And a thumping good one at that, I'd wager. ", + images: [][]byte{ + []byte("somethingelse"), + }, }, - window: 1024, - want: "You are a Wizard. [img-0] [img-1] Hello", }, { - name: "empty list", - template: "{{ .System }} {{ .Prompt }}", - messages: []api.Message{}, - window: 1024, - want: "", - }, - { - name: "empty prompt", - template: "[INST] {{ if .System }}<>{{ .System }}<> {{ end }}{{ .Prompt }} [/INST] {{ .Response }} ", - messages: []api.Message{ - {Role: "user", Content: ""}, + name: "message with system prompt", + limit: 2048, + msgs: []api.Message{ + {Role: "system", Content: "You are the Test Who Lived."}, + {Role: "user", Content: "You're a test, Harry!"}, + {Role: "assistant", Content: "I-I'm a what?"}, + {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, + }, + expect: expect{ + prompt: "You're a test, Harry! I-I'm a what? You are the Test Who Lived. A test. And a thumping good one at that, I'd wager. ", }, - window: 1024, - want: "", }, } - encode := func(s string) ([]int, error) { - words := strings.Fields(s) - return make([]int, len(words)), nil + tmpl, err := template.Parse(` +{{- if .System }}{{ .System }} {{ end }} +{{- if .Prompt }}{{ .Prompt }} {{ end }} +{{- if .Response }}{{ .Response }} {{ end }}`) + if err != nil { + t.Fatal(err) } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - tmpl, err := template.Parse(tc.template) + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + r := runnerRef{ + llama: mock{}, + model: &Model{Template: tmpl, ProjectorPaths: []string{"vision"}}, + Options: &api.Options{}, + } + + r.NumCtx = tt.limit + prompt, images, err := chatPrompt(context.TODO(), &r, tt.msgs) if err != nil { t.Fatal(err) } - got, err := ChatPrompt(tmpl, tc.messages, tc.window, encode) - if err != nil { - t.Errorf("error = %v", err) + if tt.prompt != prompt { + t.Errorf("expected %q, got %q", tt.prompt, prompt) } - if got != tc.want { - t.Errorf("got: %q, want: %q", got, tc.want) + if len(images) != len(tt.images) { + t.Fatalf("expected %d images, got %d", len(tt.images), len(images)) + } + + for i := range images { + if images[i].ID != i { + t.Errorf("expected ID %d, got %d", i, images[i].ID) + } + + if !bytes.Equal(images[i].Data, tt.images[i]) { + t.Errorf("expected %q, got %q", tt.images[i], images[i]) + } } }) } diff --git a/server/routes.go b/server/routes.go index ac6b713a7..35e64511b 100644 --- a/server/routes.go +++ b/server/routes.go @@ -1,13 +1,13 @@ package server import ( + "bytes" "cmp" "context" "encoding/json" "errors" "fmt" "io" - "io/fs" "log/slog" "net" "net/http" @@ -67,163 +67,140 @@ func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options return opts, nil } -func isSupportedImageType(image []byte) bool { - contentType := http.DetectContentType(image) - allowedTypes := []string{"image/jpeg", "image/jpg", "image/png"} - return slices.Contains(allowedTypes, contentType) +func (s *Server) scheduleRunner(ctx context.Context, name string, caps []Capability, requestOpts map[string]any, keepAlive *api.Duration) (*runnerRef, error) { + if name == "" { + return nil, errors.New("model is required") + } + + model, err := GetModel(name) + if err != nil { + return nil, err + } + + if err := model.CheckCapabilities(caps...); err != nil { + return nil, fmt.Errorf("%s %w", name, err) + } + + opts, err := modelOptions(model, requestOpts) + if err != nil { + return nil, err + } + + runnerCh, errCh := s.sched.GetRunner(ctx, model, opts, keepAlive) + var runner *runnerRef + select { + case runner = <-runnerCh: + case err = <-errCh: + return nil, err + } + + return runner, nil } func (s *Server) GenerateHandler(c *gin.Context) { - checkpointStart := time.Now() var req api.GenerateRequest - err := c.ShouldBindJSON(&req) - - switch { - case errors.Is(err, io.EOF): + if err := c.ShouldBindJSON(&req); errors.Is(err, io.EOF) { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"}) return - case err != nil: + } else if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } - // validate the request - switch { - case req.Model == "": - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "model is required"}) + if req.Format != "" && req.Format != "json" { + c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "format must be empty or \"json\""}) return - case len(req.Format) > 0 && req.Format != "json": - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "format must be json"}) - return - case req.Raw && (req.Template != "" || req.System != "" || len(req.Context) > 0): + } else if req.Raw && (req.Template != "" || req.System != "" || len(req.Context) > 0) { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "raw mode does not support template, system, or context"}) return } - for _, img := range req.Images { - if !isSupportedImageType(img) { - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "unsupported image format"}) - return - } - } - - model, err := GetModel(req.Model) - if err != nil { - var pErr *fs.PathError - if errors.As(err, &pErr) { - c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model '%s' not found, try pulling it first", req.Model)}) - return - } - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + caps := []Capability{CapabilityCompletion} + r, err := s.scheduleRunner(c.Request.Context(), req.Model, caps, req.Options, req.KeepAlive) + if errors.Is(err, errCapabilityCompletion) { + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%q does not support generate", req.Model)}) + return + } else if err != nil { + handleScheduleError(c, err) return } - if !model.Has(CapabilityCompletion) { - c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support generate", req.Model)}) - return + images := make([]llm.ImageData, len(req.Images)) + for i := range req.Images { + images[i] = llm.ImageData{ID: i, Data: req.Images[i]} } - opts, err := modelOptions(model, req.Options) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) - var runner *runnerRef - select { - case runner = <-rCh: - case err = <-eCh: - handleErrorResponse(c, err) - return - } - - // an empty request loads the model - // note: for a short while template was used in lieu - // of `raw` mode so we need to check for it too - if req.Prompt == "" && req.Template == "" && req.System == "" { - c.JSON(http.StatusOK, api.GenerateResponse{ - CreatedAt: time.Now().UTC(), - Model: req.Model, - Done: true, - DoneReason: "load", - }) - return - } - - tmpl, err := template.Parse(req.Template) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - checkpointLoaded := time.Now() - - var prompt string - switch { - case req.Raw: - prompt = req.Prompt - case req.Prompt != "": - if req.Template == "" { - tmpl = model.Template + prompt := req.Prompt + if !req.Raw { + var msgs []api.Message + if req.System != "" { + msgs = append(msgs, api.Message{Role: "system", Content: req.System}) + } else if r.model.System != "" { + msgs = append(msgs, api.Message{Role: "system", Content: r.model.System}) } - if req.System == "" { - req.System = model.System + if req.Prompt != "" { + for _, i := range images { + msgs = append(msgs, api.Message{Role: "user", Content: fmt.Sprintf("[img-%d]", i.ID)}) + } + + msgs = append(msgs, api.Message{Role: "user", Content: req.Prompt}) } - slog.Debug("generate handler", "prompt", req.Prompt) - slog.Debug("generate handler", "template", req.Template) - slog.Debug("generate handler", "system", req.System) - - var sb strings.Builder - for i := range req.Images { - fmt.Fprintf(&sb, "[img-%d] ", i) - } - - sb.WriteString(req.Prompt) - - p, err := Prompt(tmpl, req.System, sb.String(), "", true) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + if len(msgs) == 0 { + c.JSON(http.StatusOK, api.GenerateResponse{ + Model: req.Model, + CreatedAt: time.Now().UTC(), + Done: true, + DoneReason: "load", + }) return } - sb.Reset() + tmpl := r.model.Template + if req.Template != "" { + tmpl, err = template.Parse(req.Template) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + } + + var b bytes.Buffer if req.Context != nil { - prev, err := runner.llama.Detokenize(c.Request.Context(), req.Context) + s, err := r.llama.Detokenize(c.Request.Context(), req.Context) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return } - sb.WriteString(prev) + b.WriteString(s) } - sb.WriteString(p) + if err := tmpl.Execute(&b, template.Values{Messages: msgs}); err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } - prompt = sb.String() + prompt = b.String() } - slog.Debug("generate handler", "prompt", prompt) + slog.Debug("generate request", "prompt", prompt, "images", images) ch := make(chan any) - var generated strings.Builder go func() { defer close(ch) - - fn := func(r llm.CompletionResponse) { - // Build up the full response - if _, err := generated.WriteString(r.Content); err != nil { - ch <- gin.H{"error": err.Error()} - return - } - - resp := api.GenerateResponse{ + if err := r.llama.Completion(c.Request.Context(), llm.CompletionRequest{ + Prompt: prompt, + Images: images, + Format: req.Format, + Options: *r.Options, + }, func(r llm.CompletionResponse) { + ch <- api.GenerateResponse{ Model: req.Model, CreatedAt: time.Now().UTC(), - Done: r.Done, Response: r.Content, + Done: r.Done, DoneReason: r.DoneReason, Metrics: api.Metrics{ PromptEvalCount: r.PromptEvalCount, @@ -232,77 +209,35 @@ func (s *Server) GenerateHandler(c *gin.Context) { EvalDuration: r.EvalDuration, }, } - - if r.Done { - resp.TotalDuration = time.Since(checkpointStart) - resp.LoadDuration = checkpointLoaded.Sub(checkpointStart) - - if !req.Raw { - p, err := Prompt(tmpl, req.System, req.Prompt, generated.String(), false) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - // TODO (jmorganca): encode() should not strip special tokens - tokens, err := runner.llama.Tokenize(c.Request.Context(), p) - if err != nil { - ch <- gin.H{"error": err.Error()} - return - } - - resp.Context = append(req.Context, tokens...) - } - } - - ch <- resp - } - - var images []llm.ImageData - for i := range req.Images { - images = append(images, llm.ImageData{ - ID: i, - Data: req.Images[i], - }) - } - - // Start prediction - req := llm.CompletionRequest{ - Prompt: prompt, - Format: req.Format, - Images: images, - Options: opts, - } - if err := runner.llama.Completion(c.Request.Context(), req, fn); err != nil { + }); err != nil { ch <- gin.H{"error": err.Error()} } }() if req.Stream != nil && !*req.Stream { - // Accumulate responses into the final response - var final api.GenerateResponse + var r api.GenerateResponse var sb strings.Builder - for resp := range ch { - switch r := resp.(type) { + for rr := range ch { + switch t := rr.(type) { case api.GenerateResponse: - sb.WriteString(r.Response) - final = r + sb.WriteString(t.Response) + r = t case gin.H: - if errorMsg, ok := r["error"].(string); ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": errorMsg}) - return - } else { - c.JSON(http.StatusInternalServerError, gin.H{"error": "unexpected error format in response"}) - return + msg, ok := t["error"].(string) + if !ok { + msg = "unexpected error format in response" } + + c.JSON(http.StatusInternalServerError, gin.H{"error": msg}) + return default: - c.JSON(http.StatusInternalServerError, gin.H{"error": "unexpected error"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": "unexpected response"}) return } } - final.Response = sb.String() - c.JSON(http.StatusOK, final) + r.Response = sb.String() + c.JSON(http.StatusOK, r) return } @@ -311,44 +246,17 @@ func (s *Server) GenerateHandler(c *gin.Context) { func (s *Server) EmbeddingsHandler(c *gin.Context) { var req api.EmbeddingRequest - err := c.ShouldBindJSON(&req) - switch { - case errors.Is(err, io.EOF): + if err := c.ShouldBindJSON(&req); errors.Is(err, io.EOF) { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"}) return - case err != nil: + } else if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } - if req.Model == "" { - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "model is required"}) - return - } - - model, err := GetModel(req.Model) + r, err := s.scheduleRunner(c.Request.Context(), req.Model, []Capability{}, req.Options, req.KeepAlive) if err != nil { - var pErr *fs.PathError - if errors.As(err, &pErr) { - c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model '%s' not found, try pulling it first", req.Model)}) - return - } - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - opts, err := modelOptions(model, req.Options) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) - var runner *runnerRef - select { - case runner = <-rCh: - case err = <-eCh: - handleErrorResponse(c, err) + handleScheduleError(c, err) return } @@ -358,17 +266,14 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) { return } - embedding, err := runner.llama.Embedding(c.Request.Context(), req.Prompt) + embedding, err := r.llama.Embedding(c.Request.Context(), req.Prompt) if err != nil { slog.Info(fmt.Sprintf("embedding generation failed: %v", err)) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate embedding"}) return } - resp := api.EmbeddingResponse{ - Embedding: embedding, - } - c.JSON(http.StatusOK, resp) + c.JSON(http.StatusOK, api.EmbeddingResponse{Embedding: embedding}) } func (s *Server) PullModelHandler(c *gin.Context) { @@ -649,9 +554,9 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) { } } - msgs := make([]api.Message, 0) - for _, msg := range m.Messages { - msgs = append(msgs, api.Message{Role: msg.Role, Content: msg.Content}) + msgs := make([]api.Message, len(m.Messages)) + for i, msg := range m.Messages { + msgs[i] = api.Message{Role: msg.Role, Content: msg.Content} } n := model.ParseName(req.Model) @@ -1214,132 +1119,55 @@ func (s *Server) ProcessHandler(c *gin.Context) { c.JSON(http.StatusOK, api.ProcessResponse{Models: models}) } -// ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model -func chatPrompt(ctx context.Context, runner *runnerRef, template *template.Template, messages []api.Message, numCtx int) (string, error) { - encode := func(s string) ([]int, error) { - return runner.llama.Tokenize(ctx, s) - } - - prompt, err := ChatPrompt(template, messages, numCtx, encode) - if err != nil { - return "", err - } - - return prompt, nil -} - func (s *Server) ChatHandler(c *gin.Context) { - checkpointStart := time.Now() - var req api.ChatRequest - err := c.ShouldBindJSON(&req) - switch { - case errors.Is(err, io.EOF): + if err := c.ShouldBindJSON(&req); errors.Is(err, io.EOF) { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"}) return - case err != nil: + } else if err != nil { c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } - // validate the request - switch { - case req.Model == "": - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "model is required"}) + caps := []Capability{CapabilityCompletion} + r, err := s.scheduleRunner(c.Request.Context(), req.Model, caps, req.Options, req.KeepAlive) + if errors.Is(err, errCapabilityCompletion) { + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%q does not support chat", req.Model)}) return - case len(req.Format) > 0 && req.Format != "json": - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "format must be json"}) + } else if err != nil { + handleScheduleError(c, err) return } - model, err := GetModel(req.Model) - if err != nil { - var pErr *fs.PathError - if errors.As(err, &pErr) { - c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model '%s' not found, try pulling it first", req.Model)}) - return - } - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - if !model.Has(CapabilityCompletion) { - c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support chat", req.Model)}) - return - } - - opts, err := modelOptions(model, req.Options) - if err != nil { - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) - return - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) - var runner *runnerRef - select { - case runner = <-rCh: - case err = <-eCh: - handleErrorResponse(c, err) - return - } - - checkpointLoaded := time.Now() - - // if the first message is not a system message, then add the model's default system message - if len(req.Messages) > 0 && req.Messages[0].Role != "system" { - req.Messages = append([]api.Message{ - { - Role: "system", - Content: model.System, - }, - }, req.Messages...) - } - - prompt, err := chatPrompt(c.Request.Context(), runner, model.Template, req.Messages, opts.NumCtx) - if err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) - return - } - - // an empty request loads the model - if len(req.Messages) == 0 || prompt == "" { - resp := api.ChatResponse{ - CreatedAt: time.Now().UTC(), + if len(req.Messages) == 0 { + c.JSON(http.StatusOK, api.ChatResponse{ Model: req.Model, + CreatedAt: time.Now().UTC(), + Message: api.Message{Role: "assistant"}, Done: true, DoneReason: "load", - Message: api.Message{Role: "assistant"}, - } - c.JSON(http.StatusOK, resp) + }) return } - // only send images that are in the prompt - var i int - var images []llm.ImageData - for _, m := range req.Messages { - for _, img := range m.Images { - if !isSupportedImageType(img) { - c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "unsupported image format"}) - return - } - - if strings.Contains(prompt, fmt.Sprintf("[img-%d]", i)) { - images = append(images, llm.ImageData{Data: img, ID: i}) - } - i += 1 - } + prompt, images, err := chatPrompt(c.Request.Context(), r, req.Messages) + if err != nil { + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return } - slog.Debug("chat handler", "prompt", prompt, "images", len(images)) + slog.Debug("chat request", "images", len(images), "prompt", prompt) ch := make(chan any) - go func() { defer close(ch) - - fn := func(r llm.CompletionResponse) { - resp := api.ChatResponse{ + if err := r.llama.Completion(c.Request.Context(), llm.CompletionRequest{ + Prompt: prompt, + Images: images, + Format: req.Format, + Options: *r.Options, + }, func(r llm.CompletionResponse) { + ch <- api.ChatResponse{ Model: req.Model, CreatedAt: time.Now().UTC(), Message: api.Message{Role: "assistant", Content: r.Content}, @@ -1352,64 +1180,48 @@ func (s *Server) ChatHandler(c *gin.Context) { EvalDuration: r.EvalDuration, }, } - - if r.Done { - resp.TotalDuration = time.Since(checkpointStart) - resp.LoadDuration = checkpointLoaded.Sub(checkpointStart) - } - - ch <- resp - } - - if err := runner.llama.Completion(c.Request.Context(), llm.CompletionRequest{ - Prompt: prompt, - Format: req.Format, - Images: images, - Options: opts, - }, fn); err != nil { + }); err != nil { ch <- gin.H{"error": err.Error()} } }() if req.Stream != nil && !*req.Stream { - // Accumulate responses into the final response - var final api.ChatResponse + var r api.ChatResponse var sb strings.Builder - for resp := range ch { - switch r := resp.(type) { + for rr := range ch { + switch t := rr.(type) { case api.ChatResponse: - sb.WriteString(r.Message.Content) - final = r + sb.WriteString(t.Message.Content) + r = t case gin.H: - if errorMsg, ok := r["error"].(string); ok { - c.JSON(http.StatusInternalServerError, gin.H{"error": errorMsg}) - return - } else { - c.JSON(http.StatusInternalServerError, gin.H{"error": "unexpected error format in response"}) - return + msg, ok := t["error"].(string) + if !ok { + msg = "unexpected error format in response" } + + c.JSON(http.StatusInternalServerError, gin.H{"error": msg}) + return default: - c.JSON(http.StatusInternalServerError, gin.H{"error": "unexpected error"}) + c.JSON(http.StatusInternalServerError, gin.H{"error": "unexpected response"}) return } } - final.Message = api.Message{Role: "assistant", Content: sb.String()} - c.JSON(http.StatusOK, final) + r.Message.Content = sb.String() + c.JSON(http.StatusOK, r) return } streamResponse(c, ch) } -func handleErrorResponse(c *gin.Context, err error) { - if errors.Is(err, context.Canceled) { +func handleScheduleError(c *gin.Context, err error) { + switch { + case errors.Is(err, context.Canceled): c.JSON(499, gin.H{"error": "request canceled"}) - return - } - if errors.Is(err, ErrMaxQueue) { + case errors.Is(err, ErrMaxQueue): c.JSON(http.StatusServiceUnavailable, gin.H{"error": err.Error()}) - return + default: + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) } - c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) } diff --git a/template/template.go b/template/template.go index d15f7156f..cfba5a238 100644 --- a/template/template.go +++ b/template/template.go @@ -5,6 +5,7 @@ import ( "embed" "encoding/json" "errors" + "fmt" "io" "math" "slices" @@ -14,6 +15,7 @@ import ( "text/template/parse" "github.com/agnivade/levenshtein" + "github.com/ollama/ollama/api" "golang.org/x/exp/maps" ) @@ -74,30 +76,78 @@ func Named(s string) (*named, error) { return nil, errors.New("no matching template found") } +var DefaultTemplate, _ = Parse("{{ .Prompt }}") + type Template struct { *template.Template raw string } +var response = parse.ActionNode{ + NodeType: parse.NodeAction, + Pipe: &parse.PipeNode{ + NodeType: parse.NodePipe, + Cmds: []*parse.CommandNode{ + { + NodeType: parse.NodeCommand, + Args: []parse.Node{ + &parse.FieldNode{ + NodeType: parse.NodeField, + Ident: []string{"Response"}, + }, + }, + }, + }, + }, +} + +func Parse(s string) (*Template, error) { + tmpl := template.New("").Option("missingkey=zero").Funcs(template.FuncMap{ + "toJson": func(v any) string { + b, err := json.Marshal(v) + if err != nil { + return "" + } + + return string(b) + }, + "isLastMessage": func(s []*api.Message, m *api.Message) bool { + for i := len(s) - 1; i >= 0; i-- { + if m.Role != s[i].Role { + continue + } + + return m == s[i] + } + + return false + }, + }) + + tmpl, err := tmpl.Parse(s) + if err != nil { + return nil, err + } + + t := Template{Template: tmpl, raw: s} + if vars := t.Vars(); !slices.Contains(vars, "messages") && !slices.Contains(vars, "response") { + // touch up the template and append {{ .Response }} + tmpl.Tree.Root.Nodes = append(tmpl.Tree.Root.Nodes, &response) + } + + return &t, nil +} + func (t *Template) String() string { return t.raw } -var DefaultTemplate, _ = Parse("{{ .Prompt }}") - -func Parse(s string) (*Template, error) { - t, err := template.New("").Option("missingkey=zero").Parse(s) - if err != nil { - return nil, err - } - - return &Template{Template: t, raw: s}, nil -} - func (t *Template) Vars() []string { var vars []string - for _, n := range t.Tree.Root.Nodes { - vars = append(vars, parseNode(n)...) + for _, tt := range t.Templates() { + for _, n := range tt.Root.Nodes { + vars = append(vars, parseNode(n)...) + } } set := make(map[string]struct{}) @@ -110,6 +160,97 @@ func (t *Template) Vars() []string { return vars } +type Values struct { + Messages []api.Message +} + +func (t *Template) Execute(w io.Writer, v Values) error { + system, collated := collate(v.Messages) + if slices.Contains(t.Vars(), "messages") { + return t.Template.Execute(w, map[string]any{ + "System": system, + "Messages": collated, + }) + } + + var b bytes.Buffer + var prompt, response string + for i, m := range collated { + if m.Role == "user" { + prompt = m.Content + } else { + response = m.Content + } + + if i != len(collated)-1 && prompt != "" && response != "" { + if err := t.Template.Execute(&b, map[string]any{ + "System": "", + "Prompt": prompt, + "Response": response, + }); err != nil { + return err + } + + prompt = "" + response = "" + } + } + + var cut bool + tree := t.Template.Copy() + // for the last message, cut everything after "{{ .Response }}" + tree.Root.Nodes = slices.DeleteFunc(tree.Root.Nodes, func(n parse.Node) bool { + if slices.Contains(parseNode(n), "Response") { + cut = true + } + + return cut + }) + + if err := template.Must(template.New("").AddParseTree("", tree)).Execute(&b, map[string]any{ + "System": system, + "Prompt": prompt, + }); err != nil { + return err + } + + _, err := io.Copy(w, &b) + return err +} + +func collate(msgs []api.Message) (system string, collated []*api.Message) { + var n int + for i := range msgs { + msg := msgs[i] + if msg.Role == "system" { + if system != "" { + system += "\n\n" + } + + system += msg.Content + continue + } + + for range msg.Images { + imageTag := fmt.Sprintf("[img-%d]", n) + if !strings.Contains(msg.Content, "[img]") { + msg.Content = strings.TrimSpace("[img] " + msg.Content) + } + + msg.Content = strings.Replace(msg.Content, "[img]", imageTag, 1) + n++ + } + + if len(collated) > 0 && collated[len(collated)-1].Role == msg.Role { + collated[len(collated)-1].Content += "\n\n" + msg.Content + } else { + collated = append(collated, &msg) + } + } + + return +} + func parseNode(n parse.Node) []string { switch n := n.(type) { case *parse.ActionNode: @@ -152,6 +293,8 @@ func parseNode(n parse.Node) []string { return names case *parse.FieldNode: return n.Ident + case *parse.TemplateNode: + return parseNode(n.Pipe) } return nil diff --git a/template/template_test.go b/template/template_test.go index eda4634f4..5d5dad4b2 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -11,6 +11,7 @@ import ( "testing" "text/template" + "github.com/ollama/ollama/api" "github.com/ollama/ollama/llm" ) @@ -64,13 +65,12 @@ func TestParse(t *testing.T) { template string vars []string }{ - {"{{ .Prompt }}", []string{"prompt"}}, - {"{{ .System }} {{ .Prompt }}", []string{"prompt", "system"}}, + {"{{ .Prompt }}", []string{"prompt", "response"}}, + {"{{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system"}}, {"{{ .System }} {{ .Prompt }} {{ .Response }}", []string{"prompt", "response", "system"}}, - {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "system", "tools"}}, + {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system", "tools"}}, {"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}}, {"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}}, - {"{{ .Prompt }} {{ .Suffix }}", []string{"prompt", "suffix"}}, } for _, tt := range cases { @@ -87,3 +87,148 @@ func TestParse(t *testing.T) { }) } } + +func TestExecuteWithMessages(t *testing.T) { + cases := []struct { + templates []string + values Values + expected string + }{ + { + []string{ + `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `, + `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`, + `{{- range .Messages }} +{{- if eq .Role "user" }}[INST] {{ if and (isLastMessage $.Messages .) $.System }}{{ $.System }}{{ print "\n\n" }} +{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} +{{- end }} +{{- end }}`, + }, + Values{ + Messages: []api.Message{ + {Role: "user", Content: "Hello friend!"}, + {Role: "assistant", Content: "Hello human!"}, + {Role: "user", Content: "Yay!"}, + }, + }, + `[INST] Hello friend![/INST] Hello human![INST] Yay![/INST] `, + }, + { + []string{ + `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `, + `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`, + ` +{{- range .Messages }} +{{- if eq .Role "user" }}[INST] {{ if and (isLastMessage $.Messages .) $.System }}{{ $.System }}{{ print "\n\n" }} +{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} +{{- end }} +{{- end }}`, + }, + Values{ + Messages: []api.Message{ + {Role: "system", Content: "You are a helpful assistant!"}, + {Role: "user", Content: "Hello friend!"}, + {Role: "assistant", Content: "Hello human!"}, + {Role: "user", Content: "Yay!"}, + }, + }, + `[INST] Hello friend![/INST] Hello human![INST] You are a helpful assistant! + +Yay![/INST] `, + }, + { + []string{ + `{{ if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }}{{ if .Prompt }}<|im_start|>user +{{ .Prompt }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ .Response }}<|im_end|> +`, + ` +{{- range .Messages }} +{{- if and (eq .Role "user") (isLastMessage $.Messages .) $.System }}<|im_start|>system +{{ $.System }}<|im_end|>{{ print "\n" }} +{{- end }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|>{{ print "\n" }} +{{- end }}<|im_start|>assistant +`, + }, + Values{ + Messages: []api.Message{ + {Role: "system", Content: "You are a helpful assistant!"}, + {Role: "user", Content: "Hello friend!"}, + {Role: "assistant", Content: "Hello human!"}, + {Role: "user", Content: "Yay!"}, + }, + }, + `<|im_start|>user +Hello friend!<|im_end|> +<|im_start|>assistant +Hello human!<|im_end|> +<|im_start|>system +You are a helpful assistant!<|im_end|> +<|im_start|>user +Yay!<|im_end|> +<|im_start|>assistant +`, + }, + { + []string{ + `{{ if .Prompt }}Question: {{ .Prompt }} + +{{ end }}Answer: {{ .Response }} + +`, + ` +{{- range .Messages }} +{{- if eq .Role "user" }}Question: {{ .Content }}{{ print "\n\n" }} +{{- else if eq .Role "assistant" }}Answer: {{ .Content }}{{ print "\n\n" }} +{{- end }} +{{- end }}Answer: `, + }, + Values{ + Messages: []api.Message{ + {Role: "user", Content: "What's in this image?", Images: []api.ImageData{[]byte("")}}, + {Role: "assistant", Content: "It's a hot dog."}, + {Role: "user", Content: "What's in _this_ image?"}, + {Role: "user", Images: []api.ImageData{[]byte("")}}, + {Role: "user", Content: "Is it a hot dog?"}, + }, + }, + `Question: [img-0] What's in this image? + +Answer: It's a hot dog. + +Question: What's in _this_ image? + +[img-1] + +Is it a hot dog? + +Answer: `, + }, + } + + for _, tt := range cases { + t.Run("", func(t *testing.T) { + for _, tmpl := range tt.templates { + t.Run("", func(t *testing.T) { + tmpl, err := Parse(tmpl) + if err != nil { + t.Fatal(err) + } + + var b bytes.Buffer + if err := tmpl.Execute(&b, tt.values); err != nil { + t.Fatal(err) + } + + if b.String() != tt.expected { + t.Errorf("expected\n%s,\ngot\n%s", tt.expected, b.String()) + } + }) + } + }) + } +} From 2c3fe1fd972b7810091120f844afc35bc98accbd Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 20 Jun 2024 11:00:08 -0700 Subject: [PATCH 061/106] comments --- server/prompt.go | 29 +++--- server/prompt_test.go | 34 +++---- server/routes.go | 46 +++++----- template/template.go | 48 +++++----- template/template_test.go | 180 ++++++++++++++++++++++++++++++-------- 5 files changed, 224 insertions(+), 113 deletions(-) diff --git a/server/prompt.go b/server/prompt.go index 5016fbe14..51d691a9f 100644 --- a/server/prompt.go +++ b/server/prompt.go @@ -11,8 +11,13 @@ import ( "github.com/ollama/ollama/template" ) -func chatPrompt(ctx context.Context, r *runnerRef, msgs []api.Message) (prompt string, images []llm.ImageData, _ error) { - // extract system messages which should always be included +type tokenizeFunc func(context.Context, string) ([]int, error) + +// chatPrompt accepts a list of messages and returns the prompt and images that should be used for the next chat turn. +// chatPrompt truncates any messages that exceed the context window of the model, making sure to always include 1) the +// latest message and 2) system messages +func chatPrompt(ctx context.Context, m *Model, tokenize tokenizeFunc, opts *api.Options, msgs []api.Message) (prompt string, images []llm.ImageData, _ error) { + // pull out any system messages which should always be included in the prompt var system []api.Message msgs = slices.DeleteFunc(msgs, func(m api.Message) bool { if m.Role == "system" { @@ -23,32 +28,35 @@ func chatPrompt(ctx context.Context, r *runnerRef, msgs []api.Message) (prompt s return false }) - if len(system) == 0 && r.model.System != "" { + if len(system) == 0 && m.System != "" { // add model system prompt since it wasn't provided - system = append(system, api.Message{Role: "system", Content: r.model.System}) + system = append(system, api.Message{Role: "system", Content: m.System}) } + // always include the last message n := len(msgs) - 1 + // in reverse, find all messages that fit into context window for i := n - 1; i >= 0; i-- { var b bytes.Buffer - if err := r.model.Template.Execute(&b, template.Values{Messages: append(system, msgs[i:]...)}); err != nil { + if err := m.Template.Execute(&b, template.Values{Messages: append(system, msgs[i:]...)}); err != nil { return "", nil, err } - s, err := r.llama.Tokenize(ctx, b.String()) + s, err := tokenize(ctx, b.String()) if err != nil { return "", nil, err } c := len(s) - if r.model.ProjectorPaths != nil { + if m.ProjectorPaths != nil { for _, m := range msgs[i:] { - // TODO: get image embedding length from project metadata + // images are represented as 768 sized embeddings + // TODO: get embedding length from project metadata c += 768 * len(m.Images) } } - if c > r.NumCtx { + if c > opts.NumCtx { slog.Debug("truncating input messages which exceed context length", "truncated", len(msgs[i:])) break } else { @@ -56,8 +64,9 @@ func chatPrompt(ctx context.Context, r *runnerRef, msgs []api.Message) (prompt s } } + // truncate any messages that do not fit into the context window var b bytes.Buffer - if err := r.model.Template.Execute(&b, template.Values{Messages: append(system, msgs[n:]...)}); err != nil { + if err := m.Template.Execute(&b, template.Values{Messages: append(system, msgs[n:]...)}); err != nil { return "", nil, err } diff --git a/server/prompt_test.go b/server/prompt_test.go index 59288b46c..d4cee98c2 100644 --- a/server/prompt_test.go +++ b/server/prompt_test.go @@ -7,15 +7,10 @@ import ( "testing" "github.com/ollama/ollama/api" - "github.com/ollama/ollama/llm" "github.com/ollama/ollama/template" ) -type mock struct { - llm.LlamaServer -} - -func (m mock) Tokenize(_ context.Context, s string) (tokens []int, err error) { +func tokenize(_ context.Context, s string) (tokens []int, err error) { for range strings.Fields(s) { tokens = append(tokens, len(tokens)) } @@ -48,7 +43,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "truncate messages", + name: "truncate messages", limit: 1, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, @@ -60,7 +55,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "truncate messages with image", + name: "truncate messages with image", limit: 64, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, @@ -75,7 +70,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "truncate messages with images", + name: "truncate messages with images", limit: 64, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!", Images: []api.ImageData{[]byte("something")}}, @@ -90,7 +85,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "messages with images", + name: "messages with images", limit: 2048, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!", Images: []api.ImageData{[]byte("something")}}, @@ -106,7 +101,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "message with image tag", + name: "message with image tag", limit: 2048, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry! [img]", Images: []api.ImageData{[]byte("something")}}, @@ -122,7 +117,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "messages with interleaved images", + name: "messages with interleaved images", limit: 2048, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, @@ -140,7 +135,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "truncate message with interleaved images", + name: "truncate message with interleaved images", limit: 1024, msgs: []api.Message{ {Role: "user", Content: "You're a test, Harry!"}, @@ -157,7 +152,7 @@ func TestChatPrompt(t *testing.T) { }, }, { - name: "message with system prompt", + name: "message with system prompt", limit: 2048, msgs: []api.Message{ {Role: "system", Content: "You are the Test Who Lived."}, @@ -181,14 +176,9 @@ func TestChatPrompt(t *testing.T) { for _, tt := range cases { t.Run(tt.name, func(t *testing.T) { - r := runnerRef{ - llama: mock{}, - model: &Model{Template: tmpl, ProjectorPaths: []string{"vision"}}, - Options: &api.Options{}, - } - - r.NumCtx = tt.limit - prompt, images, err := chatPrompt(context.TODO(), &r, tt.msgs) + model := Model{Template: tmpl, ProjectorPaths: []string{"vision"}} + opts := api.Options{Runner: api.Runner{NumCtx: tt.limit}} + prompt, images, err := chatPrompt(context.TODO(), &model, tokenize, &opts, tt.msgs) if err != nil { t.Fatal(err) } diff --git a/server/routes.go b/server/routes.go index 35e64511b..1a93e9770 100644 --- a/server/routes.go +++ b/server/routes.go @@ -54,6 +54,8 @@ func init() { gin.SetMode(mode) } +var errRequired = errors.New("is required") + func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options, error) { opts := api.DefaultOptions() if err := opts.FromMap(model.Options); err != nil { @@ -69,7 +71,7 @@ func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options func (s *Server) scheduleRunner(ctx context.Context, name string, caps []Capability, requestOpts map[string]any, keepAlive *api.Duration) (*runnerRef, error) { if name == "" { - return nil, errors.New("model is required") + return nil, fmt.Errorf("model %w", errRequired) } model, err := GetModel(name) @@ -121,7 +123,17 @@ func (s *Server) GenerateHandler(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%q does not support generate", req.Model)}) return } else if err != nil { - handleScheduleError(c, err) + handleScheduleError(c, req.Model, err) + return + } + + if req.Prompt == "" { + c.JSON(http.StatusOK, api.GenerateResponse{ + Model: req.Model, + CreatedAt: time.Now().UTC(), + Done: true, + DoneReason: "load", + }) return } @@ -139,23 +151,11 @@ func (s *Server) GenerateHandler(c *gin.Context) { msgs = append(msgs, api.Message{Role: "system", Content: r.model.System}) } - if req.Prompt != "" { - for _, i := range images { - msgs = append(msgs, api.Message{Role: "user", Content: fmt.Sprintf("[img-%d]", i.ID)}) - } - - msgs = append(msgs, api.Message{Role: "user", Content: req.Prompt}) + for _, i := range images { + msgs = append(msgs, api.Message{Role: "user", Content: fmt.Sprintf("[img-%d]", i.ID)}) } - if len(msgs) == 0 { - c.JSON(http.StatusOK, api.GenerateResponse{ - Model: req.Model, - CreatedAt: time.Now().UTC(), - Done: true, - DoneReason: "load", - }) - return - } + msgs = append(msgs, api.Message{Role: "user", Content: req.Prompt}) tmpl := r.model.Template if req.Template != "" { @@ -256,7 +256,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) { r, err := s.scheduleRunner(c.Request.Context(), req.Model, []Capability{}, req.Options, req.KeepAlive) if err != nil { - handleScheduleError(c, err) + handleScheduleError(c, req.Model, err) return } @@ -1135,7 +1135,7 @@ func (s *Server) ChatHandler(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%q does not support chat", req.Model)}) return } else if err != nil { - handleScheduleError(c, err) + handleScheduleError(c, req.Model, err) return } @@ -1150,7 +1150,7 @@ func (s *Server) ChatHandler(c *gin.Context) { return } - prompt, images, err := chatPrompt(c.Request.Context(), r, req.Messages) + prompt, images, err := chatPrompt(c.Request.Context(), r.model, r.llama.Tokenize, r.Options, req.Messages) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return @@ -1215,12 +1215,16 @@ func (s *Server) ChatHandler(c *gin.Context) { streamResponse(c, ch) } -func handleScheduleError(c *gin.Context, err error) { +func handleScheduleError(c *gin.Context, name string, err error) { switch { + case errors.Is(err, errRequired): + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) case errors.Is(err, context.Canceled): c.JSON(499, gin.H{"error": "request canceled"}) case errors.Is(err, ErrMaxQueue): c.JSON(http.StatusServiceUnavailable, gin.H{"error": err.Error()}) + case errors.Is(err, os.ErrNotExist): + c.JSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("model %q not found, try pulling it first", name)}) default: c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) } diff --git a/template/template.go b/template/template.go index cfba5a238..c8f8f6d0d 100644 --- a/template/template.go +++ b/template/template.go @@ -83,6 +83,7 @@ type Template struct { raw string } +// response is a template node that can be added to templates that don't already have one var response = parse.ActionNode{ NodeType: parse.NodeAction, Pipe: &parse.PipeNode{ @@ -101,28 +102,25 @@ var response = parse.ActionNode{ }, } +var funcs = template.FuncMap{ + "toJson": func(v any) string { + b, err := json.Marshal(v) + if err != nil { + return "" + } + + return string(b) + }, + "add": func(a, b int) int { + return a + b + }, + "sub": func(a, b int) int { + return a - b + }, +} + func Parse(s string) (*Template, error) { - tmpl := template.New("").Option("missingkey=zero").Funcs(template.FuncMap{ - "toJson": func(v any) string { - b, err := json.Marshal(v) - if err != nil { - return "" - } - - return string(b) - }, - "isLastMessage": func(s []*api.Message, m *api.Message) bool { - for i := len(s) - 1; i >= 0; i-- { - if m.Role != s[i].Role { - continue - } - - return m == s[i] - } - - return false - }, - }) + tmpl := template.New("").Option("missingkey=zero").Funcs(funcs) tmpl, err := tmpl.Parse(s) if err != nil { @@ -218,7 +216,13 @@ func (t *Template) Execute(w io.Writer, v Values) error { return err } -func collate(msgs []api.Message) (system string, collated []*api.Message) { +type messages []*api.Message + +// collate messages based on role. consecutive messages of the same role are merged +// into a single message. collate also pulls out and merges messages with Role == "system" +// which are templated separately. As a side effect, it mangles message content adding image +// tags ([img-%d]) as needed +func collate(msgs []api.Message) (system string, collated messages) { var n int for i := range msgs { msg := msgs[i] diff --git a/template/template_test.go b/template/template_test.go index 5d5dad4b2..ac92bf489 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -8,6 +8,7 @@ import ( "os" "path/filepath" "slices" + "strconv" "testing" "text/template" @@ -15,6 +16,98 @@ import ( "github.com/ollama/ollama/llm" ) +func TestFuncs(t *testing.T) { + t.Run("toJson", func(t *testing.T) { + cases := []struct { + input any + expected string + }{ + {nil, "null"}, + {true, "true"}, + {false, "false"}, + {0, "0"}, + {1, "1"}, + {1.0, "1"}, + {1.1, "1.1"}, + {"", `""`}, + {"hello", `"hello"`}, + {[]int{1, 2, 3}, "[1,2,3]"}, + {[]string{"a", "b", "c"}, `["a","b","c"]`}, + {map[string]int{"a": 1, "b": 2}, `{"a":1,"b":2}`}, + {map[string]string{"a": "b", "c": "d"}, `{"a":"b","c":"d"}`}, + } + + for _, tt := range cases { + t.Run(tt.expected, func(t *testing.T) { + toJson, ok := funcs["toJson"].(func(any) string) + if !ok { + t.Fatal("toJson is not a function") + } + + if s := toJson(tt.input); s != tt.expected { + t.Errorf("expected %q, got %q", tt.expected, s) + } + }) + } + }) + + t.Run("add", func(t *testing.T) { + cases := []struct { + a, b int + expected int + }{ + {0, 0, 0}, + {0, 1, 1}, + {1, 0, 1}, + {1, 1, 2}, + {1, -1, 0}, + {-1, 1, 0}, + {-1, -1, -2}, + } + + for _, tt := range cases { + t.Run(strconv.Itoa(tt.expected), func(t *testing.T) { + add, ok := funcs["add"].(func(int, int) int) + if !ok { + t.Fatal("add is not a function") + } + + if n := add(tt.a, tt.b); n != tt.expected { + t.Errorf("expected %d, got %d", tt.expected, n) + } + }) + } + }) + + t.Run("sub", func(t *testing.T) { + cases := []struct { + a, b int + expected int + }{ + {0, 0, 0}, + {0, 1, -1}, + {1, 0, 1}, + {1, 1, 0}, + {1, -1, 2}, + {-1, 1, -2}, + {-1, -1, 0}, + } + + for _, tt := range cases { + t.Run(strconv.Itoa(tt.expected), func(t *testing.T) { + sub, ok := funcs["sub"].(func(int, int) int) + if !ok { + t.Fatal("sub is not a function") + } + + if n := sub(tt.a, tt.b); n != tt.expected { + t.Errorf("expected %d, got %d", tt.expected, n) + } + }) + } + }) +} + func TestNamed(t *testing.T) { f, err := os.Open(filepath.Join("testdata", "templates.jsonl")) if err != nil { @@ -89,77 +182,86 @@ func TestParse(t *testing.T) { } func TestExecuteWithMessages(t *testing.T) { + type template struct { + name string + template string + } cases := []struct { - templates []string + name string + templates []template values Values expected string }{ { - []string{ - `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `, - `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`, - `{{- range .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (isLastMessage $.Messages .) $.System }}{{ $.System }}{{ print "\n\n" }} + "mistral", + []template{ + {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", `{{- range .Messages }} +{{- if eq .Role "user" }}[INST] {{ if and (eq (index $.Messages (sub (len $.Messages) 1)) .) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} -{{- end }}`, +{{- end }}`}, }, Values{ Messages: []api.Message{ {Role: "user", Content: "Hello friend!"}, {Role: "assistant", Content: "Hello human!"}, - {Role: "user", Content: "Yay!"}, + {Role: "user", Content: "What is your name?"}, }, }, - `[INST] Hello friend![/INST] Hello human![INST] Yay![/INST] `, + `[INST] Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, }, { - []string{ - `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `, - `[INST] {{ if .System }}{{ .System }}{{ print "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`, - ` + "mistral system", + []template{ + {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", ` {{- range .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (isLastMessage $.Messages .) $.System }}{{ $.System }}{{ print "\n\n" }} +{{- if eq .Role "user" }}[INST] {{ if and (eq (index $.Messages (sub (len $.Messages) 1)) .) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} -{{- end }}`, +{{- end }}`}, }, Values{ Messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant!"}, {Role: "user", Content: "Hello friend!"}, {Role: "assistant", Content: "Hello human!"}, - {Role: "user", Content: "Yay!"}, + {Role: "user", Content: "What is your name?"}, }, }, `[INST] Hello friend![/INST] Hello human![INST] You are a helpful assistant! -Yay![/INST] `, +What is your name?[/INST] `, }, { - []string{ - `{{ if .System }}<|im_start|>system + "chatml", + []template{ + // this does not have a "no response" test because it's impossible to render the same output + {"response", `{{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> -`, - ` +`}, + {"messages", ` {{- range .Messages }} -{{- if and (eq .Role "user") (isLastMessage $.Messages .) $.System }}<|im_start|>system -{{ $.System }}<|im_end|>{{ print "\n" }} +{{- if and (eq .Role "user") (eq (index $.Messages (sub (len $.Messages) 1)) .) $.System }}<|im_start|>system +{{ $.System }}<|im_end|>{{ "\n" }} {{- end }}<|im_start|>{{ .Role }} -{{ .Content }}<|im_end|>{{ print "\n" }} +{{ .Content }}<|im_end|>{{ "\n" }} {{- end }}<|im_start|>assistant -`, +`}, }, Values{ Messages: []api.Message{ {Role: "system", Content: "You are a helpful assistant!"}, {Role: "user", Content: "Hello friend!"}, {Role: "assistant", Content: "Hello human!"}, - {Role: "user", Content: "Yay!"}, + {Role: "user", Content: "What is your name?"}, }, }, `<|im_start|>user @@ -169,23 +271,25 @@ Hello human!<|im_end|> <|im_start|>system You are a helpful assistant!<|im_end|> <|im_start|>user -Yay!<|im_end|> +What is your name?<|im_end|> <|im_start|>assistant `, }, { - []string{ - `{{ if .Prompt }}Question: {{ .Prompt }} + "moondream", + []template{ + // this does not have a "no response" test because it's impossible to render the same output + {"response", `{{ if .Prompt }}Question: {{ .Prompt }} {{ end }}Answer: {{ .Response }} -`, - ` +`}, + {"messages", ` {{- range .Messages }} -{{- if eq .Role "user" }}Question: {{ .Content }}{{ print "\n\n" }} -{{- else if eq .Role "assistant" }}Answer: {{ .Content }}{{ print "\n\n" }} +{{- if eq .Role "user" }}Question: {{ .Content }}{{ "\n\n" }} +{{- else if eq .Role "assistant" }}Answer: {{ .Content }}{{ "\n\n" }} {{- end }} -{{- end }}Answer: `, +{{- end }}Answer: `}, }, Values{ Messages: []api.Message{ @@ -211,10 +315,10 @@ Answer: `, } for _, tt := range cases { - t.Run("", func(t *testing.T) { - for _, tmpl := range tt.templates { - t.Run("", func(t *testing.T) { - tmpl, err := Parse(tmpl) + t.Run(tt.name, func(t *testing.T) { + for _, ttt := range tt.templates { + t.Run(ttt.name, func(t *testing.T) { + tmpl, err := Parse(ttt.template) if err != nil { t.Fatal(err) } From ac7a842e550721fbc00e36e416e7cf6606993149 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 3 Jul 2024 09:00:07 -0700 Subject: [PATCH 062/106] fix model reloading ensure runtime model changes (template, system prompt, messages, options) are captured on model updates without needing to reload the server --- llm/server.go | 2 +- server/routes.go | 42 ++++++++++++++++++++++-------------------- 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/llm/server.go b/llm/server.go index 206f9e391..229d61e4a 100644 --- a/llm/server.go +++ b/llm/server.go @@ -679,7 +679,7 @@ type CompletionRequest struct { Prompt string Format string Images []ImageData - Options api.Options + Options *api.Options } type CompletionResponse struct { diff --git a/server/routes.go b/server/routes.go index 1a93e9770..4059c7c52 100644 --- a/server/routes.go +++ b/server/routes.go @@ -69,23 +69,25 @@ func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options return opts, nil } -func (s *Server) scheduleRunner(ctx context.Context, name string, caps []Capability, requestOpts map[string]any, keepAlive *api.Duration) (*runnerRef, error) { +// scheduleRunner schedules a runner after validating inputs such as capabilities and model options. +// It returns the allocated runner, model instance, and consolidated options if successful and error otherwise. +func (s *Server) scheduleRunner(ctx context.Context, name string, caps []Capability, requestOpts map[string]any, keepAlive *api.Duration) (llm.LlamaServer, *Model, *api.Options, error) { if name == "" { - return nil, fmt.Errorf("model %w", errRequired) + return nil, nil, nil, fmt.Errorf("model %w", errRequired) } model, err := GetModel(name) if err != nil { - return nil, err + return nil, nil, nil, err } if err := model.CheckCapabilities(caps...); err != nil { - return nil, fmt.Errorf("%s %w", name, err) + return nil, nil, nil, fmt.Errorf("%s %w", name, err) } opts, err := modelOptions(model, requestOpts) if err != nil { - return nil, err + return nil, nil, nil, err } runnerCh, errCh := s.sched.GetRunner(ctx, model, opts, keepAlive) @@ -93,10 +95,10 @@ func (s *Server) scheduleRunner(ctx context.Context, name string, caps []Capabil select { case runner = <-runnerCh: case err = <-errCh: - return nil, err + return nil, nil, nil, err } - return runner, nil + return runner.llama, model, &opts, nil } func (s *Server) GenerateHandler(c *gin.Context) { @@ -118,7 +120,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { } caps := []Capability{CapabilityCompletion} - r, err := s.scheduleRunner(c.Request.Context(), req.Model, caps, req.Options, req.KeepAlive) + r, m, opts, err := s.scheduleRunner(c.Request.Context(), req.Model, caps, req.Options, req.KeepAlive) if errors.Is(err, errCapabilityCompletion) { c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%q does not support generate", req.Model)}) return @@ -147,8 +149,8 @@ func (s *Server) GenerateHandler(c *gin.Context) { var msgs []api.Message if req.System != "" { msgs = append(msgs, api.Message{Role: "system", Content: req.System}) - } else if r.model.System != "" { - msgs = append(msgs, api.Message{Role: "system", Content: r.model.System}) + } else if m.System != "" { + msgs = append(msgs, api.Message{Role: "system", Content: m.System}) } for _, i := range images { @@ -157,7 +159,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { msgs = append(msgs, api.Message{Role: "user", Content: req.Prompt}) - tmpl := r.model.Template + tmpl := m.Template if req.Template != "" { tmpl, err = template.Parse(req.Template) if err != nil { @@ -168,7 +170,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { var b bytes.Buffer if req.Context != nil { - s, err := r.llama.Detokenize(c.Request.Context(), req.Context) + s, err := r.Detokenize(c.Request.Context(), req.Context) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return @@ -190,11 +192,11 @@ func (s *Server) GenerateHandler(c *gin.Context) { ch := make(chan any) go func() { defer close(ch) - if err := r.llama.Completion(c.Request.Context(), llm.CompletionRequest{ + if err := r.Completion(c.Request.Context(), llm.CompletionRequest{ Prompt: prompt, Images: images, Format: req.Format, - Options: *r.Options, + Options: opts, }, func(r llm.CompletionResponse) { ch <- api.GenerateResponse{ Model: req.Model, @@ -254,7 +256,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) { return } - r, err := s.scheduleRunner(c.Request.Context(), req.Model, []Capability{}, req.Options, req.KeepAlive) + r, _, _, err := s.scheduleRunner(c.Request.Context(), req.Model, []Capability{}, req.Options, req.KeepAlive) if err != nil { handleScheduleError(c, req.Model, err) return @@ -266,7 +268,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) { return } - embedding, err := r.llama.Embedding(c.Request.Context(), req.Prompt) + embedding, err := r.Embedding(c.Request.Context(), req.Prompt) if err != nil { slog.Info(fmt.Sprintf("embedding generation failed: %v", err)) c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate embedding"}) @@ -1130,7 +1132,7 @@ func (s *Server) ChatHandler(c *gin.Context) { } caps := []Capability{CapabilityCompletion} - r, err := s.scheduleRunner(c.Request.Context(), req.Model, caps, req.Options, req.KeepAlive) + r, m, opts, err := s.scheduleRunner(c.Request.Context(), req.Model, caps, req.Options, req.KeepAlive) if errors.Is(err, errCapabilityCompletion) { c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%q does not support chat", req.Model)}) return @@ -1150,7 +1152,7 @@ func (s *Server) ChatHandler(c *gin.Context) { return } - prompt, images, err := chatPrompt(c.Request.Context(), r.model, r.llama.Tokenize, r.Options, req.Messages) + prompt, images, err := chatPrompt(c.Request.Context(), m, r.Tokenize, opts, req.Messages) if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) return @@ -1161,11 +1163,11 @@ func (s *Server) ChatHandler(c *gin.Context) { ch := make(chan any) go func() { defer close(ch) - if err := r.llama.Completion(c.Request.Context(), llm.CompletionRequest{ + if err := r.Completion(c.Request.Context(), llm.CompletionRequest{ Prompt: prompt, Images: images, Format: req.Format, - Options: *r.Options, + Options: opts, }, func(r llm.CompletionResponse) { ch <- api.ChatResponse{ Model: req.Model, From 326363b3a72d9e2972a019dfc4c6147ea901f501 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 3 Jul 2024 13:49:14 -0700 Subject: [PATCH 063/106] no funcs --- template/template.go | 19 +------ template/template_test.go | 105 +++----------------------------------- 2 files changed, 7 insertions(+), 117 deletions(-) diff --git a/template/template.go b/template/template.go index c8f8f6d0d..b133b97e9 100644 --- a/template/template.go +++ b/template/template.go @@ -102,25 +102,8 @@ var response = parse.ActionNode{ }, } -var funcs = template.FuncMap{ - "toJson": func(v any) string { - b, err := json.Marshal(v) - if err != nil { - return "" - } - - return string(b) - }, - "add": func(a, b int) int { - return a + b - }, - "sub": func(a, b int) int { - return a - b - }, -} - func Parse(s string) (*Template, error) { - tmpl := template.New("").Option("missingkey=zero").Funcs(funcs) + tmpl := template.New("").Option("missingkey=zero") tmpl, err := tmpl.Parse(s) if err != nil { diff --git a/template/template_test.go b/template/template_test.go index ac92bf489..ac16bd606 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -8,7 +8,6 @@ import ( "os" "path/filepath" "slices" - "strconv" "testing" "text/template" @@ -16,98 +15,6 @@ import ( "github.com/ollama/ollama/llm" ) -func TestFuncs(t *testing.T) { - t.Run("toJson", func(t *testing.T) { - cases := []struct { - input any - expected string - }{ - {nil, "null"}, - {true, "true"}, - {false, "false"}, - {0, "0"}, - {1, "1"}, - {1.0, "1"}, - {1.1, "1.1"}, - {"", `""`}, - {"hello", `"hello"`}, - {[]int{1, 2, 3}, "[1,2,3]"}, - {[]string{"a", "b", "c"}, `["a","b","c"]`}, - {map[string]int{"a": 1, "b": 2}, `{"a":1,"b":2}`}, - {map[string]string{"a": "b", "c": "d"}, `{"a":"b","c":"d"}`}, - } - - for _, tt := range cases { - t.Run(tt.expected, func(t *testing.T) { - toJson, ok := funcs["toJson"].(func(any) string) - if !ok { - t.Fatal("toJson is not a function") - } - - if s := toJson(tt.input); s != tt.expected { - t.Errorf("expected %q, got %q", tt.expected, s) - } - }) - } - }) - - t.Run("add", func(t *testing.T) { - cases := []struct { - a, b int - expected int - }{ - {0, 0, 0}, - {0, 1, 1}, - {1, 0, 1}, - {1, 1, 2}, - {1, -1, 0}, - {-1, 1, 0}, - {-1, -1, -2}, - } - - for _, tt := range cases { - t.Run(strconv.Itoa(tt.expected), func(t *testing.T) { - add, ok := funcs["add"].(func(int, int) int) - if !ok { - t.Fatal("add is not a function") - } - - if n := add(tt.a, tt.b); n != tt.expected { - t.Errorf("expected %d, got %d", tt.expected, n) - } - }) - } - }) - - t.Run("sub", func(t *testing.T) { - cases := []struct { - a, b int - expected int - }{ - {0, 0, 0}, - {0, 1, -1}, - {1, 0, 1}, - {1, 1, 0}, - {1, -1, 2}, - {-1, 1, -2}, - {-1, -1, 0}, - } - - for _, tt := range cases { - t.Run(strconv.Itoa(tt.expected), func(t *testing.T) { - sub, ok := funcs["sub"].(func(int, int) int) - if !ok { - t.Fatal("sub is not a function") - } - - if n := sub(tt.a, tt.b); n != tt.expected { - t.Errorf("expected %d, got %d", tt.expected, n) - } - }) - } - }) -} - func TestNamed(t *testing.T) { f, err := os.Open(filepath.Join("testdata", "templates.jsonl")) if err != nil { @@ -197,8 +104,8 @@ func TestExecuteWithMessages(t *testing.T) { []template{ {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- range .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (index $.Messages (sub (len $.Messages) 1)) .) $.System }}{{ $.System }}{{ "\n\n" }} + {"messages", `{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, @@ -218,8 +125,8 @@ func TestExecuteWithMessages(t *testing.T) { {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", ` -{{- range .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (index $.Messages (sub (len $.Messages) 1)) .) $.System }}{{ $.System }}{{ "\n\n" }} +{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, @@ -248,8 +155,8 @@ What is your name?[/INST] `, {{ .Response }}<|im_end|> `}, {"messages", ` -{{- range .Messages }} -{{- if and (eq .Role "user") (eq (index $.Messages (sub (len $.Messages) 1)) .) $.System }}<|im_start|>system +{{- range $index, $_ := .Messages }} +{{- if and (eq .Role "user") (eq (len (slice $.Messages $index)) 1) $.System }}<|im_start|>system {{ $.System }}<|im_end|>{{ "\n" }} {{- end }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|>{{ "\n" }} From 631cfd9e62362d6aea72da96fa67c52a1fd4e990 Mon Sep 17 00:00:00 2001 From: Blake Mizerany Date: Fri, 5 Jul 2024 13:42:30 -0700 Subject: [PATCH 064/106] types/model: remove knowledge of digest (#5500) This was leading to ambiguity and confusion in ollama.com, and is not used anywhere in ollama at the moment. Once manifests are addressable by digest, we can add this back in, and in a way that is more tailored to the concept of addressing a manifest by digest. --- types/model/name.go | 22 +++++++--------------- types/model/name_test.go | 34 +++++++--------------------------- 2 files changed, 14 insertions(+), 42 deletions(-) diff --git a/types/model/name.go b/types/model/name.go index e645a844c..5e475687e 100644 --- a/types/model/name.go +++ b/types/model/name.go @@ -91,7 +91,6 @@ type Name struct { Namespace string Model string Tag string - RawDigest string } // ParseName parses and assembles a Name from a name string. The @@ -143,11 +142,6 @@ func ParseNameBare(s string) Name { var n Name var promised bool - s, n.RawDigest, promised = cutLast(s, "@") - if promised && n.RawDigest == "" { - n.RawDigest = MissingPart - } - // "/" is an illegal tag character, so we can use it to split the host if strings.LastIndex(s, ":") > strings.LastIndex(s, "/") { s, n.Tag, _ = cutPromised(s, ":") @@ -222,10 +216,6 @@ func (n Name) String() string { b.WriteByte(':') b.WriteString(n.Tag) } - if n.RawDigest != "" { - b.WriteByte('@') - b.WriteString(n.RawDigest) - } return b.String() } @@ -250,16 +240,18 @@ func (n Name) DisplayShortest() string { return sb.String() } -func IsValidNamespace(namespace string) bool { - return isValidPart(kindNamespace, namespace) +// IsValidNamespace reports whether the provided string is a valid +// namespace. +func IsValidNamespace(s string) bool { + return isValidPart(kindNamespace, s) } // IsValid reports whether all parts of the name are present and valid. The // digest is a special case, and is checked for validity only if present. +// +// Note: The digest check has been removed as is planned to be added back in +// at a later time. func (n Name) IsValid() bool { - if n.RawDigest != "" && !isValidPart(kindDigest, n.RawDigest) { - return false - } return n.IsFullyQualified() } diff --git a/types/model/name_test.go b/types/model/name_test.go index 008dd586c..794d14d79 100644 --- a/types/model/name_test.go +++ b/types/model/name_test.go @@ -122,21 +122,6 @@ func TestParseNameParts(t *testing.T) { }, wantFilepath: filepath.Join(part350, part80, part80, part80), }, - { - in: "@digest", - want: Name{ - RawDigest: "digest", - }, - wantValidDigest: false, - }, - { - in: "model@sha256:123", - want: Name{ - Model: "model", - RawDigest: "sha256:123", - }, - wantValidDigest: true, - }, } for _, tt := range cases { @@ -160,22 +145,18 @@ var testCases = map[string]bool{ // name -> valid "_why/_the/_lucky:_stiff": true, // minimal - "h/n/m:t@d": true, + "h/n/m:t": true, "host/namespace/model:tag": true, "host/namespace/model": false, "namespace/model": false, "model": false, - "@sha256-1000000000000000000000000000000000000000000000000000000000000000": false, - "model@sha256-1000000000000000000000000000000000000000000000000000000000000000": false, - "model@sha256:1000000000000000000000000000000000000000000000000000000000000000": false, // long (but valid) part80 + "/" + part80 + "/" + part80 + ":" + part80: true, part350 + "/" + part80 + "/" + part80 + ":" + part80: true, - "h/nn/mm:t@sha256-1000000000000000000000000000000000000000000000000000000000000000": true, // bare minimum part sizes - "h/nn/mm:t@sha256:1000000000000000000000000000000000000000000000000000000000000000": true, // bare minimum part sizes + "h/nn/mm:t": true, // bare minimum part sizes // unqualified "m": false, @@ -196,11 +177,10 @@ var testCases = map[string]bool{ // name -> valid "@": false, // not starting with alphanum - "-hh/nn/mm:tt@dd": false, - "hh/-nn/mm:tt@dd": false, - "hh/nn/-mm:tt@dd": false, - "hh/nn/mm:-tt@dd": false, - "hh/nn/mm:tt@-dd": false, + "-hh/nn/mm:tt": false, + "hh/-nn/mm:tt": false, + "hh/nn/-mm:tt": false, + "hh/nn/mm:-tt": false, // hosts "host:https/namespace/model:tag": true, @@ -334,7 +314,7 @@ func FuzzName(f *testing.F) { f.Fuzz(func(t *testing.T, s string) { n := ParseNameBare(s) if n.IsValid() { - parts := [...]string{n.Host, n.Namespace, n.Model, n.Tag, n.RawDigest} + parts := [...]string{n.Host, n.Namespace, n.Model, n.Tag} for _, part := range parts { if part == ".." { t.Errorf("unexpected .. as valid part") From 9d30f9f8b3836e8d617eadf63a71d8363ff56c7e Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 5 Jul 2024 12:25:53 -0700 Subject: [PATCH 065/106] Always go build in CI generate steps With the recent cgo changes, bugs can sneak through if we don't make sure to `go build` all the permutations --- .github/workflows/test.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 29adf56f3..13d1c957c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -58,6 +58,7 @@ jobs: runs-on: ${{ matrix.os }} env: GOARCH: ${{ matrix.arch }} + CGO_ENABLED: '1' steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 @@ -79,6 +80,7 @@ jobs: - run: go generate -x ./... if: ${{ ! startsWith(matrix.os, 'windows-') }} name: 'Unix Go Generate' + - run: go build . - uses: actions/upload-artifact@v4 with: name: ${{ matrix.os }}-${{ matrix.arch }}-libraries From 4fd5f3526a116d05cd74cfcc7217d4e6326e1bea Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 19:07:01 -0400 Subject: [PATCH 066/106] fix cmake build (#5505) --- llm/ext_server/CMakeLists.txt | 29 ++++++++++++++++------------- llm/generate/gen_common.sh | 1 + 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/llm/ext_server/CMakeLists.txt b/llm/ext_server/CMakeLists.txt index 9de50739c..c300244f9 100644 --- a/llm/ext_server/CMakeLists.txt +++ b/llm/ext_server/CMakeLists.txt @@ -1,14 +1,17 @@ - -set(TARGET ollama_llama_server) -option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) -install(TARGETS ${TARGET} RUNTIME) -target_compile_definitions(${TARGET} PRIVATE - SERVER_VERBOSE=$ -) -target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) -if (WIN32) - TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) -endif() + +set(TARGET ollama_llama_server) +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) +target_compile_definitions(${TARGET} PRIVATE + SERVER_VERBOSE=$ +) +target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) +install(TARGETS ollama_llama_server ggml llama + RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin" + LIBRARY DESTINATION "${CMAKE_BINARY_DIR}/bin" + COMPONENT ollama_llama_server) +if (WIN32) + TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) +endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) \ No newline at end of file diff --git a/llm/generate/gen_common.sh b/llm/generate/gen_common.sh index da1b06882..23feaf99d 100644 --- a/llm/generate/gen_common.sh +++ b/llm/generate/gen_common.sh @@ -81,6 +81,7 @@ apply_patches() { build() { cmake -S ${LLAMACPP_DIR} -B ${BUILD_DIR} ${CMAKE_DEFS} cmake --build ${BUILD_DIR} ${CMAKE_TARGETS} -j8 + cmake --install ${BUILD_DIR} --component ollama_llama_server } compress() { From fb6cbc02fbe0ff8d791413a81558a1fe9725b778 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 27 Jun 2024 14:15:17 -0700 Subject: [PATCH 067/106] update named templates --- go.mod | 3 +- server/routes_create_test.go | 4 +- template/alfred.gotmpl | 9 ++- template/alpaca.gotmpl | 14 +++- template/chatml.gotmpl | 11 ++- template/chatqa.gotmpl | 14 +++- template/codellama-70b-instruct.gotmpl | 13 +++- template/falcon-instruct.gotmpl | 12 +++- template/gemma-instruct.gotmpl | 14 +++- template/granite-instruct.gotmpl | 16 ++++- template/llama2-chat.gotmpl | 15 +++- template/llama3-instruct.gotmpl | 14 +++- template/magicoder.gotmpl | 15 +++- template/mistral-instruct.gotmpl | 15 ++-- template/openchat.gotmpl | 12 +++- template/phi-3.gotmpl | 11 ++- template/solar-instruct.gotmpl | 16 ++++- template/starcoder2-instruct.gotmpl | 15 ++++ template/template_test.go | 69 ++++++++++++++++++- .../alfred.gotmpl/system-user-assistant-user | 1 + template/testdata/alfred.gotmpl/user | 1 + .../alfred.gotmpl/user-assistant-user | 1 + .../alpaca.gotmpl/system-user-assistant-user | 10 +++ template/testdata/alpaca.gotmpl/user | 4 ++ .../alpaca.gotmpl/user-assistant-user | 10 +++ .../chatml.gotmpl/system-user-assistant-user | 9 +++ template/testdata/chatml.gotmpl/user | 3 + .../chatml.gotmpl/user-assistant-user | 7 ++ .../chatqa.gotmpl/system-user-assistant-user | 9 +++ template/testdata/chatqa.gotmpl/user | 3 + .../chatqa.gotmpl/user-assistant-user | 7 ++ .../system-user-assistant-user | 11 +++ .../codellama-70b-instruct.gotmpl/user | 5 ++ .../user-assistant-user | 9 +++ .../system-user-assistant-user | 8 +++ template/testdata/falcon-instruct.gotmpl/user | 3 + .../user-assistant-user | 7 ++ .../system-user-assistant-user | 8 +++ template/testdata/gemma-instruct.gotmpl/user | 3 + .../gemma-instruct.gotmpl/user-assistant-user | 7 ++ .../system-user-assistant-user | 13 ++++ .../testdata/granite-instruct.gotmpl/user | 4 ++ .../user-assistant-user | 10 +++ .../system-user-assistant-user | 5 ++ template/testdata/llama2-chat.gotmpl/user | 3 + .../llama2-chat.gotmpl/user-assistant-user | 3 + .../system-user-assistant-user | 10 +++ template/testdata/llama3-instruct.gotmpl/user | 4 ++ .../user-assistant-user | 8 +++ .../system-user-assistant-user | 12 ++++ template/testdata/magicoder.gotmpl/user | 4 ++ .../magicoder.gotmpl/user-assistant-user | 10 +++ .../system-user-assistant-user | 2 + .../testdata/mistral-instruct.gotmpl/user | 1 + .../user-assistant-user | 1 + .../system-user-assistant-user | 1 + template/testdata/openchat.gotmpl/user | 1 + .../openchat.gotmpl/user-assistant-user | 1 + .../phi-3.gotmpl/system-user-assistant-user | 9 +++ template/testdata/phi-3.gotmpl/user | 3 + .../testdata/phi-3.gotmpl/user-assistant-user | 7 ++ .../system-user-assistant-user | 13 ++++ template/testdata/solar-instruct.gotmpl/user | 4 ++ .../solar-instruct.gotmpl/user-assistant-user | 10 +++ .../system-user-assistant-user | 12 ++++ .../testdata/starcoder2-instruct.gotmpl/user | 4 ++ .../user-assistant-user | 10 +++ .../vicuna.gotmpl/system-user-assistant-user | 6 ++ template/testdata/vicuna.gotmpl/user | 2 + .../vicuna.gotmpl/user-assistant-user | 4 ++ .../zephyr.gotmpl/system-user-assistant-user | 9 +++ template/testdata/zephyr.gotmpl/user | 3 + .../zephyr.gotmpl/user-assistant-user | 7 ++ template/vicuna.gotmpl | 13 +++- template/zephyr.gotmpl | 11 ++- 75 files changed, 611 insertions(+), 27 deletions(-) create mode 100644 template/testdata/alfred.gotmpl/system-user-assistant-user create mode 100644 template/testdata/alfred.gotmpl/user create mode 100644 template/testdata/alfred.gotmpl/user-assistant-user create mode 100644 template/testdata/alpaca.gotmpl/system-user-assistant-user create mode 100644 template/testdata/alpaca.gotmpl/user create mode 100644 template/testdata/alpaca.gotmpl/user-assistant-user create mode 100644 template/testdata/chatml.gotmpl/system-user-assistant-user create mode 100644 template/testdata/chatml.gotmpl/user create mode 100644 template/testdata/chatml.gotmpl/user-assistant-user create mode 100644 template/testdata/chatqa.gotmpl/system-user-assistant-user create mode 100644 template/testdata/chatqa.gotmpl/user create mode 100644 template/testdata/chatqa.gotmpl/user-assistant-user create mode 100644 template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/codellama-70b-instruct.gotmpl/user create mode 100644 template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/falcon-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/falcon-instruct.gotmpl/user create mode 100644 template/testdata/falcon-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/gemma-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/gemma-instruct.gotmpl/user create mode 100644 template/testdata/gemma-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/granite-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/granite-instruct.gotmpl/user create mode 100644 template/testdata/granite-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/llama2-chat.gotmpl/system-user-assistant-user create mode 100644 template/testdata/llama2-chat.gotmpl/user create mode 100644 template/testdata/llama2-chat.gotmpl/user-assistant-user create mode 100644 template/testdata/llama3-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/llama3-instruct.gotmpl/user create mode 100644 template/testdata/llama3-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/magicoder.gotmpl/system-user-assistant-user create mode 100644 template/testdata/magicoder.gotmpl/user create mode 100644 template/testdata/magicoder.gotmpl/user-assistant-user create mode 100644 template/testdata/mistral-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/mistral-instruct.gotmpl/user create mode 100644 template/testdata/mistral-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/openchat.gotmpl/system-user-assistant-user create mode 100644 template/testdata/openchat.gotmpl/user create mode 100644 template/testdata/openchat.gotmpl/user-assistant-user create mode 100644 template/testdata/phi-3.gotmpl/system-user-assistant-user create mode 100644 template/testdata/phi-3.gotmpl/user create mode 100644 template/testdata/phi-3.gotmpl/user-assistant-user create mode 100644 template/testdata/solar-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/solar-instruct.gotmpl/user create mode 100644 template/testdata/solar-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/starcoder2-instruct.gotmpl/system-user-assistant-user create mode 100644 template/testdata/starcoder2-instruct.gotmpl/user create mode 100644 template/testdata/starcoder2-instruct.gotmpl/user-assistant-user create mode 100644 template/testdata/vicuna.gotmpl/system-user-assistant-user create mode 100644 template/testdata/vicuna.gotmpl/user create mode 100644 template/testdata/vicuna.gotmpl/user-assistant-user create mode 100644 template/testdata/zephyr.gotmpl/system-user-assistant-user create mode 100644 template/testdata/zephyr.gotmpl/user create mode 100644 template/testdata/zephyr.gotmpl/user-assistant-user diff --git a/go.mod b/go.mod index 6807b9b48..2e0c6614c 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( require ( github.com/agnivade/levenshtein v1.1.1 github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1 + github.com/google/go-cmp v0.6.0 github.com/mattn/go-runewidth v0.0.14 github.com/nlpodyssey/gopickle v0.3.0 github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c @@ -71,7 +72,7 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 golang.org/x/term v0.20.0 - golang.org/x/text v0.15.0 // indirect + golang.org/x/text v0.15.0 google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/server/routes_create_test.go b/server/routes_create_test.go index 340612822..269a0ba12 100644 --- a/server/routes_create_test.go +++ b/server/routes_create_test.go @@ -545,9 +545,9 @@ func TestCreateDetectTemplate(t *testing.T) { } checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ - filepath.Join(p, "blobs", "sha256-2f8e594e6f34b1b4d36a246628eeb3365ce442303d656f1fcc69e821722acea0"), - filepath.Join(p, "blobs", "sha256-542b217f179c7825eeb5bca3c77d2b75ed05bafbd3451d9188891a60a85337c6"), filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"), + filepath.Join(p, "blobs", "sha256-9512c372dfc7d84d6065b8dd2b601aeed8cc1a78e7a7aa784a42fff37f5524b7"), + filepath.Join(p, "blobs", "sha256-b8b78cb8c6eefd14c06f1af042e6161255bf87bbf2dd14fce57cdac893db8139"), }) }) diff --git a/template/alfred.gotmpl b/template/alfred.gotmpl index cecb9d2c8..44284f04c 100644 --- a/template/alfred.gotmpl +++ b/template/alfred.gotmpl @@ -1 +1,8 @@ -{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} \ No newline at end of file +{{- if .Messages }} +{{- if .System }}{{ .System }} +{{- end }} +{{- range .Messages }}{{ .Content }} +{{- end }} +{{- else }} +{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/alpaca.gotmpl b/template/alpaca.gotmpl index 440d06627..c1f69dc92 100644 --- a/template/alpaca.gotmpl +++ b/template/alpaca.gotmpl @@ -1,7 +1,19 @@ +{{- if .Messages }} +{{- if .System }}{{ .System }} +{{- end }} +{{- range .Messages }} +{{- if eq .Role "user" }}### Instruction: +{{- else if eq .Role "assistant" }}### Response: +{{- end }} +{{ .Content }} + +{{ end }}### Response: +{{ else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction: {{ .Prompt }} {{ end }}### Response: -{{ .Response }} \ No newline at end of file +{{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/chatml.gotmpl b/template/chatml.gotmpl index dcf172853..d945547c7 100644 --- a/template/chatml.gotmpl +++ b/template/chatml.gotmpl @@ -1,6 +1,15 @@ +{{- if .Messages }} +{{- if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }} +{{- range .Messages }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ else }} {{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant -{{ .Response }}<|im_end|> \ No newline at end of file +{{ .Response }}<|im_end|> +{{- end }} \ No newline at end of file diff --git a/template/chatqa.gotmpl b/template/chatqa.gotmpl index 1ede6227f..7022c4790 100644 --- a/template/chatqa.gotmpl +++ b/template/chatqa.gotmpl @@ -1,5 +1,17 @@ +{{- if .Messages }} +{{- if .System }}System: {{ .System }} + +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}User: +{{- else if eq .Role "assistant" }}Assistant: +{{- end }} {{ .Content }} + +{{ end }}Assistant: +{{- else }} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: <|begin_of_text|>{{ .Response }} \ No newline at end of file +{{ end }}Assistant: <|begin_of_text|>{{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/codellama-70b-instruct.gotmpl b/template/codellama-70b-instruct.gotmpl index 3196bd6fd..392d839eb 100644 --- a/template/codellama-70b-instruct.gotmpl +++ b/template/codellama-70b-instruct.gotmpl @@ -1,3 +1,13 @@ +{{- if .Messages }} +{{- if .System }}Source: system + + {{ .System }} {{ end }} +{{- range .Messages }}Source: {{ .Role }} + + {{ .Content }} {{ end }}Source: assistant +Destination: user + +{{ else }} {{ if .System }} Source: system {{ .System }} {{ end }} Source: user @@ -5,4 +15,5 @@ {{ .Prompt }} Source: assistant Destination: user - {{ .Response }} \ No newline at end of file + {{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/falcon-instruct.gotmpl b/template/falcon-instruct.gotmpl index 2309a1c5d..99d67f93c 100644 --- a/template/falcon-instruct.gotmpl +++ b/template/falcon-instruct.gotmpl @@ -1,3 +1,13 @@ +{{- if .Messages }} +{{- if .System }}System: {{ .System }} +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}User: +{{ else if eq .Role "assistant" }}Falcon: +{{ end }}{{ .Content }} +{{ end }}Falcon: +{{ else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: {{ .Response }} \ No newline at end of file +{{ end }}Assistant: {{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/gemma-instruct.gotmpl b/template/gemma-instruct.gotmpl index 91b9883a1..870a8f2e2 100644 --- a/template/gemma-instruct.gotmpl +++ b/template/gemma-instruct.gotmpl @@ -1,4 +1,16 @@ +{{- if .Messages }} +{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}user +{{- if and $.System (eq $index 0) }} +{{ $.System }} +{{- end }} +{{- else if eq .Role "assistant" }}model +{{- end }} +{{ .Content }} +{{ end }}model +{{ else }} user {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} model -{{ .Response }} \ No newline at end of file +{{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/granite-instruct.gotmpl b/template/granite-instruct.gotmpl index 2ede647f5..327ff3eef 100644 --- a/template/granite-instruct.gotmpl +++ b/template/granite-instruct.gotmpl @@ -1,3 +1,16 @@ +{{- if .Messages }} +{{- if .System }}System: +{{ .System }} + +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}Question: +{{- else if eq .Role "assistant" }}Answer: +{{- end }} +{{ .Content }} + +{{ end }}Answer: +{{ else }} {{ if .System }} System: {{ .System }} @@ -6,4 +19,5 @@ System: {{ .Prompt }} {{ end }}Answer: -{{ .Response }} \ No newline at end of file +{{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/llama2-chat.gotmpl b/template/llama2-chat.gotmpl index a739f6908..6327d5812 100644 --- a/template/llama2-chat.gotmpl +++ b/template/llama2-chat.gotmpl @@ -1,3 +1,16 @@ +{{- if .Messages }} +{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}[INST] {{ if eq $index 0 }}<> +{{- if $.System }} +{{ $.System }} +{{ end }}<> + +{{ end }}{{ .Content }} +{{- else }} [/INST] {{ .Content }} +{{- end }} +{{- end }} [/INST] +{{- else }} [INST] <>{{ .System }}<> -{{ .Prompt }} [/INST] {{ .Response }} \ No newline at end of file +{{ .Prompt }} [/INST] {{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/llama3-instruct.gotmpl b/template/llama3-instruct.gotmpl index 36d0218b6..9c81a9535 100644 --- a/template/llama3-instruct.gotmpl +++ b/template/llama3-instruct.gotmpl @@ -1,7 +1,19 @@ +{{- if .Messages }} +{{- if .System }}<|start_header_id|>system<|end_header_id|> + +{{ .System }}<|eot_id|> +{{- end }} +{{- range .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|> + +{{ .Content }}<|eot_id|> +{{- end }}<|start_header_id|>assistant<|end_header_id|> + +{{ else }} {{ if .System }}<|start_header_id|>system<|end_header_id|> {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> -{{ .Response }}<|eot_id|> \ No newline at end of file +{{ .Response }}<|eot_id|> +{{- end }} \ No newline at end of file diff --git a/template/magicoder.gotmpl b/template/magicoder.gotmpl index 306972ecc..73a58127c 100644 --- a/template/magicoder.gotmpl +++ b/template/magicoder.gotmpl @@ -1,7 +1,20 @@ +{{- if .Messages }} +{{- if .System }}{{ .System }} + +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}@@ Instruction +{{- else if eq .Role "assistant" }}@@ Response +{{- end }} +{{ .Content }} + +{{ end }}@@ Response +{{ else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}@@ Instruction {{ .Prompt }} {{ end }}@@ Response -{{ .Response }} \ No newline at end of file +{{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/mistral-instruct.gotmpl b/template/mistral-instruct.gotmpl index dcf172853..eb3d5ced2 100644 --- a/template/mistral-instruct.gotmpl +++ b/template/mistral-instruct.gotmpl @@ -1,6 +1,9 @@ -{{ if .System }}<|im_start|>system -{{ .System }}<|im_end|> -{{ end }}{{ if .Prompt }}<|im_start|>user -{{ .Prompt }}<|im_end|> -{{ end }}<|im_start|>assistant -{{ .Response }}<|im_end|> \ No newline at end of file +{{- if .Messages }} +{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}[INST] {{ if and $.System (eq (len (slice $.Messages $index)) 1) }}{{ $.System }} +{{ end }}{{ .Content }} +{{- else if eq .Role "assistant" }}[/INST] {{ .Content }} +{{- end }} +{{- end }}[/INST] +{{- else }}[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST] {{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/openchat.gotmpl b/template/openchat.gotmpl index d2ca38685..d5e1cbb0d 100644 --- a/template/openchat.gotmpl +++ b/template/openchat.gotmpl @@ -1 +1,11 @@ -{{ .System }}<|end_of_turn|>GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> \ No newline at end of file +{{- if .Messages }} +{{- if .System }}GPT Correct System: {{ .System }}<|end_of_turn|> +{{- end }} +{{- range .Messages }}GPT Correct +{{- if eq .Role "user" }} User: +{{- else if eq .Role "assistant" }} Assistant: +{{- end }} {{ .Content }}<|end_of_turn|> +{{- end }}GPT Correct Assistant: +{{- else }} +{{ .System }}<|end_of_turn|>GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> +{{- end }} \ No newline at end of file diff --git a/template/phi-3.gotmpl b/template/phi-3.gotmpl index bf26dcee2..a3558d2b7 100644 --- a/template/phi-3.gotmpl +++ b/template/phi-3.gotmpl @@ -1,6 +1,15 @@ +{{- if .Messages }} +{{- if .System }}<|system|> +{{ .System }}<|end|> +{{ end }} +{{- range .Messages }}<|{{ .Role }}|> +{{ .Content }}<|end|> +{{ end }}<|assistant|> +{{ else }} {{ if .System }}<|system|> {{ .System }}<|end|> {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }}<|end|> {{ end }}<|assistant|> -{{ .Response }}<|end|> \ No newline at end of file +{{ .Response }}<|end|> +{{- end }} \ No newline at end of file diff --git a/template/solar-instruct.gotmpl b/template/solar-instruct.gotmpl index c275a26a3..caa6e8e77 100644 --- a/template/solar-instruct.gotmpl +++ b/template/solar-instruct.gotmpl @@ -1,3 +1,16 @@ +{{- if .Messages }} +{{- if .System }}### System: +{{ .System }} + +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}### User: +{{ .Content }} +{{ else if eq .Role "assistant" }}### Assistant: +{{ .Content }} +{{ end }} +{{ end }}### Assistant: +{{ else }} {{ if .System }}### System: {{ .System }} @@ -5,4 +18,5 @@ {{ .Prompt }} {{ end }}### Assistant: -{{ .Response }} \ No newline at end of file +{{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/starcoder2-instruct.gotmpl b/template/starcoder2-instruct.gotmpl index 33357e54e..7d7ff9326 100644 --- a/template/starcoder2-instruct.gotmpl +++ b/template/starcoder2-instruct.gotmpl @@ -1,3 +1,17 @@ +{{- if .Messages }} +{{- if .System }}{{ .System }} + +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}### Instruction +{{ .Content }} + +{{ else if eq .Role "assistant" }}### Response +{{ .Content }}<|endoftext|> + +{{ end }} +{{- end }}### Response +{{ else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction @@ -7,3 +21,4 @@ {{ end }}### Response {{ .Response }}<|endoftext|> +{{- end }} \ No newline at end of file diff --git a/template/template_test.go b/template/template_test.go index ac16bd606..428cdc77c 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -8,9 +8,10 @@ import ( "os" "path/filepath" "slices" + "strings" "testing" - "text/template" + "github.com/google/go-cmp/cmp" "github.com/ollama/ollama/api" "github.com/ollama/ollama/llm" ) @@ -47,7 +48,7 @@ func TestNamed(t *testing.T) { t.Fatal(err) } - tmpl, err := template.New(s).Parse(b.String()) + tmpl, err := Parse(b.String()) if err != nil { t.Fatal(err) } @@ -60,6 +61,70 @@ func TestNamed(t *testing.T) { } } +func TestTemplate(t *testing.T) { + cases := make(map[string][]api.Message) + for _, mm := range [][]api.Message{ + { + {Role: "user", Content: "Hello, how are you?"}, + }, + { + {Role: "user", Content: "Hello, how are you?"}, + {Role: "assistant", Content: "I'm doing great. How can I help you today?"}, + {Role: "user", Content: "I'd like to show off how chat templating works!"}, + }, + { + {Role: "system", Content: "You are a helpful assistant."}, + {Role: "user", Content: "Hello, how are you?"}, + {Role: "assistant", Content: "I'm doing great. How can I help you today?"}, + {Role: "user", Content: "I'd like to show off how chat templating works!"}, + }, + } { + var roles []string + for _, m := range mm { + roles = append(roles, m.Role) + } + + cases[strings.Join(roles, "-")] = mm + } + + matches, err := filepath.Glob("*.gotmpl") + if err != nil { + t.Fatal(err) + } + + for _, match := range matches { + t.Run(match, func(t *testing.T) { + bts, err := os.ReadFile(match) + if err != nil { + t.Fatal(err) + } + + tmpl, err := Parse(string(bts)) + if err != nil { + t.Fatal(err) + } + + for n, tt := range cases { + t.Run(n, func(t *testing.T) { + var actual bytes.Buffer + if err := tmpl.Execute(&actual, Values{Messages: tt}); err != nil { + t.Fatal(err) + } + + expect, err := os.ReadFile(filepath.Join("testdata", match, n)) + if err != nil { + t.Fatal(err) + } + + if diff := cmp.Diff(actual.Bytes(), expect); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) + } + }) + } +} + func TestParse(t *testing.T) { cases := []struct { template string diff --git a/template/testdata/alfred.gotmpl/system-user-assistant-user b/template/testdata/alfred.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..03e23ea9c --- /dev/null +++ b/template/testdata/alfred.gotmpl/system-user-assistant-user @@ -0,0 +1 @@ +You are a helpful assistant.Hello, how are you?I'm doing great. How can I help you today?I'd like to show off how chat templating works! \ No newline at end of file diff --git a/template/testdata/alfred.gotmpl/user b/template/testdata/alfred.gotmpl/user new file mode 100644 index 000000000..7c884a6f0 --- /dev/null +++ b/template/testdata/alfred.gotmpl/user @@ -0,0 +1 @@ +Hello, how are you? \ No newline at end of file diff --git a/template/testdata/alfred.gotmpl/user-assistant-user b/template/testdata/alfred.gotmpl/user-assistant-user new file mode 100644 index 000000000..a60701ed7 --- /dev/null +++ b/template/testdata/alfred.gotmpl/user-assistant-user @@ -0,0 +1 @@ +Hello, how are you?I'm doing great. How can I help you today?I'd like to show off how chat templating works! \ No newline at end of file diff --git a/template/testdata/alpaca.gotmpl/system-user-assistant-user b/template/testdata/alpaca.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..20182d829 --- /dev/null +++ b/template/testdata/alpaca.gotmpl/system-user-assistant-user @@ -0,0 +1,10 @@ +You are a helpful assistant.### Instruction: +Hello, how are you? + +### Response: +I'm doing great. How can I help you today? + +### Instruction: +I'd like to show off how chat templating works! + +### Response: diff --git a/template/testdata/alpaca.gotmpl/user b/template/testdata/alpaca.gotmpl/user new file mode 100644 index 000000000..a0ce5dec7 --- /dev/null +++ b/template/testdata/alpaca.gotmpl/user @@ -0,0 +1,4 @@ +### Instruction: +Hello, how are you? + +### Response: diff --git a/template/testdata/alpaca.gotmpl/user-assistant-user b/template/testdata/alpaca.gotmpl/user-assistant-user new file mode 100644 index 000000000..6c5e23ff5 --- /dev/null +++ b/template/testdata/alpaca.gotmpl/user-assistant-user @@ -0,0 +1,10 @@ +### Instruction: +Hello, how are you? + +### Response: +I'm doing great. How can I help you today? + +### Instruction: +I'd like to show off how chat templating works! + +### Response: diff --git a/template/testdata/chatml.gotmpl/system-user-assistant-user b/template/testdata/chatml.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..8b013fcfb --- /dev/null +++ b/template/testdata/chatml.gotmpl/system-user-assistant-user @@ -0,0 +1,9 @@ +<|im_start|>system +You are a helpful assistant.<|im_end|> +<|im_start|>user +Hello, how are you?<|im_end|> +<|im_start|>assistant +I'm doing great. How can I help you today?<|im_end|> +<|im_start|>user +I'd like to show off how chat templating works!<|im_end|> +<|im_start|>assistant diff --git a/template/testdata/chatml.gotmpl/user b/template/testdata/chatml.gotmpl/user new file mode 100644 index 000000000..aa9e597a4 --- /dev/null +++ b/template/testdata/chatml.gotmpl/user @@ -0,0 +1,3 @@ +<|im_start|>user +Hello, how are you?<|im_end|> +<|im_start|>assistant diff --git a/template/testdata/chatml.gotmpl/user-assistant-user b/template/testdata/chatml.gotmpl/user-assistant-user new file mode 100644 index 000000000..a7cba4de0 --- /dev/null +++ b/template/testdata/chatml.gotmpl/user-assistant-user @@ -0,0 +1,7 @@ +<|im_start|>user +Hello, how are you?<|im_end|> +<|im_start|>assistant +I'm doing great. How can I help you today?<|im_end|> +<|im_start|>user +I'd like to show off how chat templating works!<|im_end|> +<|im_start|>assistant diff --git a/template/testdata/chatqa.gotmpl/system-user-assistant-user b/template/testdata/chatqa.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..98fd59bfa --- /dev/null +++ b/template/testdata/chatqa.gotmpl/system-user-assistant-user @@ -0,0 +1,9 @@ +System: You are a helpful assistant. + +User: Hello, how are you? + +Assistant: I'm doing great. How can I help you today? + +User: I'd like to show off how chat templating works! + +Assistant: \ No newline at end of file diff --git a/template/testdata/chatqa.gotmpl/user b/template/testdata/chatqa.gotmpl/user new file mode 100644 index 000000000..9e7cf702d --- /dev/null +++ b/template/testdata/chatqa.gotmpl/user @@ -0,0 +1,3 @@ +User: Hello, how are you? + +Assistant: \ No newline at end of file diff --git a/template/testdata/chatqa.gotmpl/user-assistant-user b/template/testdata/chatqa.gotmpl/user-assistant-user new file mode 100644 index 000000000..405bbe12c --- /dev/null +++ b/template/testdata/chatqa.gotmpl/user-assistant-user @@ -0,0 +1,7 @@ +User: Hello, how are you? + +Assistant: I'm doing great. How can I help you today? + +User: I'd like to show off how chat templating works! + +Assistant: \ No newline at end of file diff --git a/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..fdd0fc8b4 --- /dev/null +++ b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,11 @@ +Source: system + + You are a helpful assistant. Source: user + + Hello, how are you? Source: assistant + + I'm doing great. How can I help you today? Source: user + + I'd like to show off how chat templating works! Source: assistant +Destination: user + diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user b/template/testdata/codellama-70b-instruct.gotmpl/user new file mode 100644 index 000000000..9e7174a84 --- /dev/null +++ b/template/testdata/codellama-70b-instruct.gotmpl/user @@ -0,0 +1,5 @@ +Source: user + + Hello, how are you? Source: assistant +Destination: user + diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..b4ba1736b --- /dev/null +++ b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user @@ -0,0 +1,9 @@ +Source: user + + Hello, how are you? Source: assistant + + I'm doing great. How can I help you today? Source: user + + I'd like to show off how chat templating works! Source: assistant +Destination: user + diff --git a/template/testdata/falcon-instruct.gotmpl/system-user-assistant-user b/template/testdata/falcon-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..16e45e5b6 --- /dev/null +++ b/template/testdata/falcon-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,8 @@ +System: You are a helpful assistant. +User: +Hello, how are you? +Falcon: +I'm doing great. How can I help you today? +User: +I'd like to show off how chat templating works! +Falcon: diff --git a/template/testdata/falcon-instruct.gotmpl/user b/template/testdata/falcon-instruct.gotmpl/user new file mode 100644 index 000000000..110831a2c --- /dev/null +++ b/template/testdata/falcon-instruct.gotmpl/user @@ -0,0 +1,3 @@ +User: +Hello, how are you? +Falcon: diff --git a/template/testdata/falcon-instruct.gotmpl/user-assistant-user b/template/testdata/falcon-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..b49639ea5 --- /dev/null +++ b/template/testdata/falcon-instruct.gotmpl/user-assistant-user @@ -0,0 +1,7 @@ +User: +Hello, how are you? +Falcon: +I'm doing great. How can I help you today? +User: +I'd like to show off how chat templating works! +Falcon: diff --git a/template/testdata/gemma-instruct.gotmpl/system-user-assistant-user b/template/testdata/gemma-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..5f6c37324 --- /dev/null +++ b/template/testdata/gemma-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,8 @@ +user +You are a helpful assistant. +Hello, how are you? +model +I'm doing great. How can I help you today? +user +I'd like to show off how chat templating works! +model diff --git a/template/testdata/gemma-instruct.gotmpl/user b/template/testdata/gemma-instruct.gotmpl/user new file mode 100644 index 000000000..dc8b30b68 --- /dev/null +++ b/template/testdata/gemma-instruct.gotmpl/user @@ -0,0 +1,3 @@ +user +Hello, how are you? +model diff --git a/template/testdata/gemma-instruct.gotmpl/user-assistant-user b/template/testdata/gemma-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..1185924b0 --- /dev/null +++ b/template/testdata/gemma-instruct.gotmpl/user-assistant-user @@ -0,0 +1,7 @@ +user +Hello, how are you? +model +I'm doing great. How can I help you today? +user +I'd like to show off how chat templating works! +model diff --git a/template/testdata/granite-instruct.gotmpl/system-user-assistant-user b/template/testdata/granite-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..a732a77fb --- /dev/null +++ b/template/testdata/granite-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,13 @@ +System: +You are a helpful assistant. + +Question: +Hello, how are you? + +Answer: +I'm doing great. How can I help you today? + +Question: +I'd like to show off how chat templating works! + +Answer: diff --git a/template/testdata/granite-instruct.gotmpl/user b/template/testdata/granite-instruct.gotmpl/user new file mode 100644 index 000000000..7abd2ea35 --- /dev/null +++ b/template/testdata/granite-instruct.gotmpl/user @@ -0,0 +1,4 @@ +Question: +Hello, how are you? + +Answer: diff --git a/template/testdata/granite-instruct.gotmpl/user-assistant-user b/template/testdata/granite-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..da5e43eae --- /dev/null +++ b/template/testdata/granite-instruct.gotmpl/user-assistant-user @@ -0,0 +1,10 @@ +Question: +Hello, how are you? + +Answer: +I'm doing great. How can I help you today? + +Question: +I'd like to show off how chat templating works! + +Answer: diff --git a/template/testdata/llama2-chat.gotmpl/system-user-assistant-user b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..fc2679bf0 --- /dev/null +++ b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user @@ -0,0 +1,5 @@ +[INST] <> +You are a helpful assistant. +<> + +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/llama2-chat.gotmpl/user b/template/testdata/llama2-chat.gotmpl/user new file mode 100644 index 000000000..ceef9bdbb --- /dev/null +++ b/template/testdata/llama2-chat.gotmpl/user @@ -0,0 +1,3 @@ +[INST] <><> + +Hello, how are you? [/INST] \ No newline at end of file diff --git a/template/testdata/llama2-chat.gotmpl/user-assistant-user b/template/testdata/llama2-chat.gotmpl/user-assistant-user new file mode 100644 index 000000000..42b4c5294 --- /dev/null +++ b/template/testdata/llama2-chat.gotmpl/user-assistant-user @@ -0,0 +1,3 @@ +[INST] <><> + +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/llama3-instruct.gotmpl/system-user-assistant-user b/template/testdata/llama3-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..6740bcb4d --- /dev/null +++ b/template/testdata/llama3-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,10 @@ +<|start_header_id|>system<|end_header_id|> + +You are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|> + +Hello, how are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +I'm doing great. How can I help you today?<|eot_id|><|start_header_id|>user<|end_header_id|> + +I'd like to show off how chat templating works!<|eot_id|><|start_header_id|>assistant<|end_header_id|> + diff --git a/template/testdata/llama3-instruct.gotmpl/user b/template/testdata/llama3-instruct.gotmpl/user new file mode 100644 index 000000000..470aa028f --- /dev/null +++ b/template/testdata/llama3-instruct.gotmpl/user @@ -0,0 +1,4 @@ +<|start_header_id|>user<|end_header_id|> + +Hello, how are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|> + diff --git a/template/testdata/llama3-instruct.gotmpl/user-assistant-user b/template/testdata/llama3-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..6dd768af5 --- /dev/null +++ b/template/testdata/llama3-instruct.gotmpl/user-assistant-user @@ -0,0 +1,8 @@ +<|start_header_id|>user<|end_header_id|> + +Hello, how are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|> + +I'm doing great. How can I help you today?<|eot_id|><|start_header_id|>user<|end_header_id|> + +I'd like to show off how chat templating works!<|eot_id|><|start_header_id|>assistant<|end_header_id|> + diff --git a/template/testdata/magicoder.gotmpl/system-user-assistant-user b/template/testdata/magicoder.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..c966a861d --- /dev/null +++ b/template/testdata/magicoder.gotmpl/system-user-assistant-user @@ -0,0 +1,12 @@ +You are a helpful assistant. + +@@ Instruction +Hello, how are you? + +@@ Response +I'm doing great. How can I help you today? + +@@ Instruction +I'd like to show off how chat templating works! + +@@ Response diff --git a/template/testdata/magicoder.gotmpl/user b/template/testdata/magicoder.gotmpl/user new file mode 100644 index 000000000..ccfb02bd2 --- /dev/null +++ b/template/testdata/magicoder.gotmpl/user @@ -0,0 +1,4 @@ +@@ Instruction +Hello, how are you? + +@@ Response diff --git a/template/testdata/magicoder.gotmpl/user-assistant-user b/template/testdata/magicoder.gotmpl/user-assistant-user new file mode 100644 index 000000000..3aea6dab9 --- /dev/null +++ b/template/testdata/magicoder.gotmpl/user-assistant-user @@ -0,0 +1,10 @@ +@@ Instruction +Hello, how are you? + +@@ Response +I'm doing great. How can I help you today? + +@@ Instruction +I'd like to show off how chat templating works! + +@@ Response diff --git a/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..b6b4bf93e --- /dev/null +++ b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,2 @@ +[INST] Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] You are a helpful assistant. +I'd like to show off how chat templating works![/INST] \ No newline at end of file diff --git a/template/testdata/mistral-instruct.gotmpl/user b/template/testdata/mistral-instruct.gotmpl/user new file mode 100644 index 000000000..b04871e5d --- /dev/null +++ b/template/testdata/mistral-instruct.gotmpl/user @@ -0,0 +1 @@ +[INST] Hello, how are you?[/INST] \ No newline at end of file diff --git a/template/testdata/mistral-instruct.gotmpl/user-assistant-user b/template/testdata/mistral-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..b473e0df0 --- /dev/null +++ b/template/testdata/mistral-instruct.gotmpl/user-assistant-user @@ -0,0 +1 @@ +[INST] Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works![/INST] \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/system-user-assistant-user b/template/testdata/openchat.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..1214c1264 --- /dev/null +++ b/template/testdata/openchat.gotmpl/system-user-assistant-user @@ -0,0 +1 @@ +GPT Correct System: You are a helpful assistant.<|end_of_turn|>GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user b/template/testdata/openchat.gotmpl/user new file mode 100644 index 000000000..611daa83e --- /dev/null +++ b/template/testdata/openchat.gotmpl/user @@ -0,0 +1 @@ +GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user-assistant-user b/template/testdata/openchat.gotmpl/user-assistant-user new file mode 100644 index 000000000..f97b02b9c --- /dev/null +++ b/template/testdata/openchat.gotmpl/user-assistant-user @@ -0,0 +1 @@ +GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file diff --git a/template/testdata/phi-3.gotmpl/system-user-assistant-user b/template/testdata/phi-3.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..6109a9a24 --- /dev/null +++ b/template/testdata/phi-3.gotmpl/system-user-assistant-user @@ -0,0 +1,9 @@ +<|system|> +You are a helpful assistant.<|end|> +<|user|> +Hello, how are you?<|end|> +<|assistant|> +I'm doing great. How can I help you today?<|end|> +<|user|> +I'd like to show off how chat templating works!<|end|> +<|assistant|> diff --git a/template/testdata/phi-3.gotmpl/user b/template/testdata/phi-3.gotmpl/user new file mode 100644 index 000000000..feb96e7c9 --- /dev/null +++ b/template/testdata/phi-3.gotmpl/user @@ -0,0 +1,3 @@ +<|user|> +Hello, how are you?<|end|> +<|assistant|> diff --git a/template/testdata/phi-3.gotmpl/user-assistant-user b/template/testdata/phi-3.gotmpl/user-assistant-user new file mode 100644 index 000000000..db79d01c1 --- /dev/null +++ b/template/testdata/phi-3.gotmpl/user-assistant-user @@ -0,0 +1,7 @@ +<|user|> +Hello, how are you?<|end|> +<|assistant|> +I'm doing great. How can I help you today?<|end|> +<|user|> +I'd like to show off how chat templating works!<|end|> +<|assistant|> diff --git a/template/testdata/solar-instruct.gotmpl/system-user-assistant-user b/template/testdata/solar-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..28c1730ab --- /dev/null +++ b/template/testdata/solar-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,13 @@ +### System: +You are a helpful assistant. + +### User: +Hello, how are you? + +### Assistant: +I'm doing great. How can I help you today? + +### User: +I'd like to show off how chat templating works! + +### Assistant: diff --git a/template/testdata/solar-instruct.gotmpl/user b/template/testdata/solar-instruct.gotmpl/user new file mode 100644 index 000000000..3a43382af --- /dev/null +++ b/template/testdata/solar-instruct.gotmpl/user @@ -0,0 +1,4 @@ +### User: +Hello, how are you? + +### Assistant: diff --git a/template/testdata/solar-instruct.gotmpl/user-assistant-user b/template/testdata/solar-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..8553e73ba --- /dev/null +++ b/template/testdata/solar-instruct.gotmpl/user-assistant-user @@ -0,0 +1,10 @@ +### User: +Hello, how are you? + +### Assistant: +I'm doing great. How can I help you today? + +### User: +I'd like to show off how chat templating works! + +### Assistant: diff --git a/template/testdata/starcoder2-instruct.gotmpl/system-user-assistant-user b/template/testdata/starcoder2-instruct.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..5b718b3ec --- /dev/null +++ b/template/testdata/starcoder2-instruct.gotmpl/system-user-assistant-user @@ -0,0 +1,12 @@ +You are a helpful assistant. + +### Instruction +Hello, how are you? + +### Response +I'm doing great. How can I help you today?<|endoftext|> + +### Instruction +I'd like to show off how chat templating works! + +### Response diff --git a/template/testdata/starcoder2-instruct.gotmpl/user b/template/testdata/starcoder2-instruct.gotmpl/user new file mode 100644 index 000000000..11b0be1fe --- /dev/null +++ b/template/testdata/starcoder2-instruct.gotmpl/user @@ -0,0 +1,4 @@ +### Instruction +Hello, how are you? + +### Response diff --git a/template/testdata/starcoder2-instruct.gotmpl/user-assistant-user b/template/testdata/starcoder2-instruct.gotmpl/user-assistant-user new file mode 100644 index 000000000..d99feabb0 --- /dev/null +++ b/template/testdata/starcoder2-instruct.gotmpl/user-assistant-user @@ -0,0 +1,10 @@ +### Instruction +Hello, how are you? + +### Response +I'm doing great. How can I help you today?<|endoftext|> + +### Instruction +I'd like to show off how chat templating works! + +### Response diff --git a/template/testdata/vicuna.gotmpl/system-user-assistant-user b/template/testdata/vicuna.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..50d2f92c1 --- /dev/null +++ b/template/testdata/vicuna.gotmpl/system-user-assistant-user @@ -0,0 +1,6 @@ +You are a helpful assistant. + +USER: Hello, how are you? +ASSISTANT: I'm doing great. How can I help you today? +USER: I'd like to show off how chat templating works! +ASSISTANT: \ No newline at end of file diff --git a/template/testdata/vicuna.gotmpl/user b/template/testdata/vicuna.gotmpl/user new file mode 100644 index 000000000..cbe5ef709 --- /dev/null +++ b/template/testdata/vicuna.gotmpl/user @@ -0,0 +1,2 @@ +USER: Hello, how are you? +ASSISTANT: \ No newline at end of file diff --git a/template/testdata/vicuna.gotmpl/user-assistant-user b/template/testdata/vicuna.gotmpl/user-assistant-user new file mode 100644 index 000000000..9172547e3 --- /dev/null +++ b/template/testdata/vicuna.gotmpl/user-assistant-user @@ -0,0 +1,4 @@ +USER: Hello, how are you? +ASSISTANT: I'm doing great. How can I help you today? +USER: I'd like to show off how chat templating works! +ASSISTANT: \ No newline at end of file diff --git a/template/testdata/zephyr.gotmpl/system-user-assistant-user b/template/testdata/zephyr.gotmpl/system-user-assistant-user new file mode 100644 index 000000000..03d43fc34 --- /dev/null +++ b/template/testdata/zephyr.gotmpl/system-user-assistant-user @@ -0,0 +1,9 @@ +<|system|> +You are a helpful assistant. +<|user|> +Hello, how are you? +<|assistant|> +I'm doing great. How can I help you today? +<|user|> +I'd like to show off how chat templating works! +<|assistant|> diff --git a/template/testdata/zephyr.gotmpl/user b/template/testdata/zephyr.gotmpl/user new file mode 100644 index 000000000..6cefdaa0f --- /dev/null +++ b/template/testdata/zephyr.gotmpl/user @@ -0,0 +1,3 @@ +<|user|> +Hello, how are you? +<|assistant|> diff --git a/template/testdata/zephyr.gotmpl/user-assistant-user b/template/testdata/zephyr.gotmpl/user-assistant-user new file mode 100644 index 000000000..3937b006a --- /dev/null +++ b/template/testdata/zephyr.gotmpl/user-assistant-user @@ -0,0 +1,7 @@ +<|user|> +Hello, how are you? +<|assistant|> +I'm doing great. How can I help you today? +<|user|> +I'd like to show off how chat templating works! +<|assistant|> diff --git a/template/vicuna.gotmpl b/template/vicuna.gotmpl index 174c1a353..2e13e990d 100644 --- a/template/vicuna.gotmpl +++ b/template/vicuna.gotmpl @@ -1,3 +1,14 @@ +{{- if .Messages }} +{{- if .System }}{{ .System }} + +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}USER: {{ .Content }} +{{ else if eq .Role "assistant" }}ASSISTANT: {{ .Content }} +{{ end }} +{{- end }}ASSISTANT: +{{- else }} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} -{{ end }}ASSISTANT: {{ .Response }} \ No newline at end of file +{{ end }}ASSISTANT: {{ .Response }} +{{- end }} \ No newline at end of file diff --git a/template/zephyr.gotmpl b/template/zephyr.gotmpl index aac0c7a1f..e66688480 100644 --- a/template/zephyr.gotmpl +++ b/template/zephyr.gotmpl @@ -1,6 +1,15 @@ +{{- if .Messages }} +{{- if .System }}<|system|> +{{ .System }} +{{ end }} +{{- range .Messages }}<|{{ .Role }}|> +{{ .Content }} +{{ end }}<|assistant|> +{{ else }} {{ if .System }}<|system|> {{ .System }} {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }} {{ end }}<|assistant|> -{{ .Response }} \ No newline at end of file +{{ .Response }} +{{- end }} \ No newline at end of file From 5304b765b2bf934070e06412f6617b97a56ae3d2 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 19:34:21 -0400 Subject: [PATCH 068/106] llm: put back old include dir (#5507) * llm: put back old include dir * llm: update link paths for old submodule commits --- llm/llm.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/llm/llm.go b/llm/llm.go index fb6d4b5c7..98fe7f09a 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,12 +1,13 @@ package llm -// #cgo CFLAGS: -Illama.cpp/include -Illama.cpp/ggml/include -// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/src/libllama.a ${SRCDIR}/build/darwin/arm64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal -// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/src/libllama.a ${SRCDIR}/build/darwin/x86_64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal -// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/src/libllama.a ${SRCDIR}/build/windows/amd64_static/ggml/src/libggml.a -static -lstdc++ -// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/src/libllama.a ${SRCDIR}/build/windows/arm64_static/ggml/src/libggml.a -static -lstdc++ -// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/src/libllama.a ${SRCDIR}/build/linux/x86_64_static/ggml/src/libggml.a -lstdc++ -// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml.a -lstdc++ +// #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include +// #cgo LDFLAGS: -lllama -lggml -lstdc++ +// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal +// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src +// #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src +// #cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src +// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src +// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src // #include // #include "llama.h" import "C" From 2cc854f8cb5b9670fc53134f8104569c60d535be Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 21:48:31 -0400 Subject: [PATCH 069/106] llm: fix missing dylibs by restoring old build behavior on Linux and macOS (#5511) * Revert "fix cmake build (#5505)" This reverts commit 4fd5f3526a116d05cd74cfcc7217d4e6326e1bea. * llm: fix missing dylibs by restoring old build behavior * crlf -> lf --- llm/ext_server/CMakeLists.txt | 28 ++++++++++++---------------- llm/generate/gen_common.sh | 1 - llm/generate/gen_darwin.sh | 6 +++--- llm/generate/gen_linux.sh | 2 +- 4 files changed, 16 insertions(+), 21 deletions(-) diff --git a/llm/ext_server/CMakeLists.txt b/llm/ext_server/CMakeLists.txt index c300244f9..b63f3c0e5 100644 --- a/llm/ext_server/CMakeLists.txt +++ b/llm/ext_server/CMakeLists.txt @@ -1,17 +1,13 @@ - -set(TARGET ollama_llama_server) -option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) -target_compile_definitions(${TARGET} PRIVATE - SERVER_VERBOSE=$ -) -target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) -install(TARGETS ollama_llama_server ggml llama - RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin" - LIBRARY DESTINATION "${CMAKE_BINARY_DIR}/bin" - COMPONENT ollama_llama_server) -if (WIN32) - TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) -endif() +set(TARGET ollama_llama_server) +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) +install(TARGETS ${TARGET} RUNTIME) +target_compile_definitions(${TARGET} PRIVATE + SERVER_VERBOSE=$ +) +target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) +if (WIN32) + TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) +endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) \ No newline at end of file diff --git a/llm/generate/gen_common.sh b/llm/generate/gen_common.sh index 23feaf99d..da1b06882 100644 --- a/llm/generate/gen_common.sh +++ b/llm/generate/gen_common.sh @@ -81,7 +81,6 @@ apply_patches() { build() { cmake -S ${LLAMACPP_DIR} -B ${BUILD_DIR} ${CMAKE_DEFS} cmake --build ${BUILD_DIR} ${CMAKE_TARGETS} -j8 - cmake --install ${BUILD_DIR} --component ollama_llama_server } compress() { diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index 02577545a..8b4779f95 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -18,7 +18,7 @@ sign() { fi } -COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off" +COMMON_DARWIN_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off" case "${GOARCH}" in "amd64") @@ -27,7 +27,7 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build @@ -75,7 +75,7 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" + CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index c36862520..2bea1c4e6 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -51,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then export CUDACXX=$(command -v nvcc) fi fi -COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off" +COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off" source $(dirname $0)/gen_common.sh init_vars git_module_setup From e0348d3fe8042b7e378a7cbcee95d17d20a14017 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 22:42:42 -0400 Subject: [PATCH 070/106] llm: add `COMMON_DARWIN_DEFS` to arm static build (#5513) --- llm/generate/gen_darwin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index 8b4779f95..6c0b62cb7 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -75,7 +75,7 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build From 9ae146993e9ec834b95d038df1eecac68a744f18 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 03:27:05 -0400 Subject: [PATCH 071/106] llm: add `GGML_STATIC` flag to windows static lib --- llm/generate/gen_windows.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 5c6943502..123c44cc1 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -204,6 +204,7 @@ function build_static() { "-DCMAKE_C_COMPILER=gcc.exe", "-DCMAKE_CXX_COMPILER=g++.exe", "-DBUILD_SHARED_LIBS=off", + "-DGGML_STATIC=on", "-DGGML_NATIVE=off", "-DGGML_AVX=off", "-DGGML_AVX2=off", From f1a379aa566f7a9fefb2a64ac35faf34d9c00812 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 12:54:02 -0400 Subject: [PATCH 072/106] llm: statically link pthread and stdc++ dependencies in windows build --- llm/generate/gen_windows.ps1 | 1 - llm/llm.go | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 123c44cc1..5c6943502 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -204,7 +204,6 @@ function build_static() { "-DCMAKE_C_COMPILER=gcc.exe", "-DCMAKE_CXX_COMPILER=g++.exe", "-DBUILD_SHARED_LIBS=off", - "-DGGML_STATIC=on", "-DGGML_NATIVE=off", "-DGGML_AVX=off", "-DGGML_AVX2=off", diff --git a/llm/llm.go b/llm/llm.go index 98fe7f09a..3cd162e0c 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,7 +1,8 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include -// #cgo LDFLAGS: -lllama -lggml -lstdc++ +// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread +// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From 5796bfc4013f4ebe26cdbf13554332a25c405027 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 14:06:20 -0400 Subject: [PATCH 073/106] llm: only statically link libstdc++ --- .github/workflows/release.yaml | 4 ++++ llm/llm.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 61ca3c433..1042c6845 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -304,6 +304,10 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" + - name: remove unwanted mingw dll.a files + run: | + Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libpthread.dll.a" + Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libwinpthread.dll.a" - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/llm/llm.go b/llm/llm.go index 3cd162e0c..ac6a52490 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,8 +1,8 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include +// #cgo windows LDFLAGS: -static-libstdc++ // #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread -// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From 6cea0360276e5fc7e2fecbe0cadf89cc72615279 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 15:10:48 -0400 Subject: [PATCH 074/106] Revert "llm: only statically link libstdc++" This reverts commit 5796bfc4013f4ebe26cdbf13554332a25c405027. --- .github/workflows/release.yaml | 4 ---- llm/llm.go | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 1042c6845..61ca3c433 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -304,10 +304,6 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" - - name: remove unwanted mingw dll.a files - run: | - Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libpthread.dll.a" - Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libwinpthread.dll.a" - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/llm/llm.go b/llm/llm.go index ac6a52490..3cd162e0c 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,8 +1,8 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include -// #cgo windows LDFLAGS: -static-libstdc++ // #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread +// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From a08f20d910194edff79d45315330a088fda3f136 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 15:21:15 -0400 Subject: [PATCH 075/106] release: remove unwanted mingw dll.a files --- .github/workflows/release.yaml | 5 +++++ llm/llm.go | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 61ca3c433..d1faf9f5b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -85,6 +85,11 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" + - name: remove unwanted mingw dll.a files + run: | + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/llm/llm.go b/llm/llm.go index 3cd162e0c..88c0258d6 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -2,7 +2,6 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include // #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread -// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From c12f1c5b99c9d9f9388f464aa77063987fdb8f0f Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 16:12:29 -0400 Subject: [PATCH 076/106] release: move mingw library cleanup to correct job --- .github/workflows/release.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d1faf9f5b..0005c69d3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -85,11 +85,6 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" - - name: remove unwanted mingw dll.a files - run: | - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force - uses: actions/setup-go@v5 with: go-version-file: go.mod @@ -309,6 +304,11 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" + - name: remove unwanted mingw dll.a files + run: | + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force - uses: actions/setup-go@v5 with: go-version-file: go.mod From 4607c706413f1354d0e762d25a9a0a933edc14ec Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 6 Jul 2024 18:58:16 -0400 Subject: [PATCH 077/106] llm: add `-DBUILD_SHARED_LIBS=off` to common cpu cmake flags (#5520) --- llm/generate/gen_linux.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 2bea1c4e6..d3e2d13ba 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -77,7 +77,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then init_vars echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\"" - CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" + CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu" echo "Building custom CPU" build @@ -93,7 +93,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake - COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off" + COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off" if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then # # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) From f8241bfba384cf8c888847dc44b73d7f43a42d82 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 6 Jul 2024 19:35:04 -0400 Subject: [PATCH 078/106] gpu: report system free memory instead of 0 (#5521) --- gpu/gpu_darwin.go | 2 +- gpu/gpu_info_darwin.h | 1 + gpu/gpu_info_darwin.m | 26 ++++++++++++++++++++++++-- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/gpu/gpu_darwin.go b/gpu/gpu_darwin.go index f26d23c12..39d8fcf89 100644 --- a/gpu/gpu_darwin.go +++ b/gpu/gpu_darwin.go @@ -56,7 +56,7 @@ func GetCPUInfo() GpuInfoList { func GetCPUMem() (memInfo, error) { return memInfo{ TotalMemory: uint64(C.getPhysicalMemory()), - FreeMemory: 0, + FreeMemory: uint64(C.getFreeMemory()), }, nil } diff --git a/gpu/gpu_info_darwin.h b/gpu/gpu_info_darwin.h index 3edca237c..415e7922d 100644 --- a/gpu/gpu_info_darwin.h +++ b/gpu/gpu_info_darwin.h @@ -2,3 +2,4 @@ #include uint64_t getRecommendedMaxVRAM(); uint64_t getPhysicalMemory(); +uint64_t getFreeMemory(); diff --git a/gpu/gpu_info_darwin.m b/gpu/gpu_info_darwin.m index a145ac076..5ca139e0b 100644 --- a/gpu/gpu_info_darwin.m +++ b/gpu/gpu_info_darwin.m @@ -1,4 +1,5 @@ -// go:build darwin +#import +#import #include "gpu_info_darwin.h" uint64_t getRecommendedMaxVRAM() { @@ -8,6 +9,27 @@ uint64_t getRecommendedMaxVRAM() { return result; } +// getPhysicalMemory returns the total physical memory in bytes uint64_t getPhysicalMemory() { - return [[NSProcessInfo processInfo] physicalMemory]; + return [NSProcessInfo processInfo].physicalMemory; +} + +// getFreeMemory returns the total free memory in bytes, including inactive +// memory that can be reclaimed by the system. +uint64_t getFreeMemory() { + mach_port_t host_port = mach_host_self(); + mach_msg_type_number_t host_size = sizeof(vm_statistics64_data_t) / sizeof(integer_t); + vm_size_t pagesize; + vm_statistics64_data_t vm_stat; + + host_page_size(host_port, &pagesize); + if (host_statistics64(host_port, HOST_VM_INFO64, (host_info64_t)&vm_stat, &host_size) != KERN_SUCCESS) { + return 0; + } + + uint64_t free_memory = (uint64_t)vm_stat.free_count * pagesize; + free_memory += (uint64_t)vm_stat.speculative_count * pagesize; + free_memory += (uint64_t)vm_stat.inactive_count * pagesize; + + return free_memory; } From 0ee87615c74c69d8fbc3cad8f3ea5a2364b1a876 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 6 Jul 2024 22:01:52 -0400 Subject: [PATCH 079/106] sched: don't error if paging to disk on Windows and macOS (#5523) --- server/sched.go | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/server/sched.go b/server/sched.go index 8c054c6b4..9dff2ae07 100644 --- a/server/sched.go +++ b/server/sched.go @@ -197,25 +197,36 @@ func (s *Scheduler) processPending(ctx context.Context) { break } - // Block attempting to load a model larger than system memory + GPU memory estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts) maxSize := systemMem.FreeMemory - for _, gpu := range gpus { - if gpu.Library == "cpu" { - continue - } - if loadedCount == 0 { - // If no other models are loaded, set the limit based on what's available - maxSize += gpu.FreeMemory - } else { - // Other models could be unloaded, favor total memory for limit - maxSize += gpu.TotalMemory + + // Add available GPU memory to the total pool + // macOS hardware has unified memory so don't double count + if runtime.GOOS != "darwin" { + for _, gpu := range gpus { + if gpu.Library == "cpu" { + continue + } + if loadedCount == 0 { + // If no other models are loaded, set the limit based on what's available + maxSize += gpu.FreeMemory + } else { + // Other models could be unloaded, favor total memory for limit + maxSize += gpu.TotalMemory + } } } + + // Block attempting to load a model larger than system memory + GPU memory if estimate.TotalSize > maxSize { slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize)) - pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) - break + + // Linux will crash if over-allocating memory - return an error to the user. + // TODO (jmorganca): add reasonable upper limits for darwin and windows as well + if runtime.GOOS == "linux" { + pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) + break + } } // Evaluate if the model will fit in the available system memory, or if we should unload a model first From 0e09c380fcae8b81db3c3447d70d721cfad00dbd Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 12:38:04 -0400 Subject: [PATCH 080/106] llm: print caching notices in debug only (#5533) --- llm/ext_server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 00a15b4a3..7ae58e382 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1413,7 +1413,7 @@ struct llama_server_context return get_slot(-1); } - LOG_INFO("slot with common prefix found", {{ + LOG_DEBUG("slot with common prefix found", {{ "slot_id", slot->id, "characters", longest }}); From 571dc61955ced560a45e9d32b1cd2a52d9803c8c Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 13:03:09 -0400 Subject: [PATCH 081/106] Update llama.cpp submodule to `a8db2a9c` (#5530) --- llm/llama.cpp | 2 +- llm/patches/05-default-pretokenizer.diff | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/llm/llama.cpp b/llm/llama.cpp index d7fd29fff..a8db2a9ce 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit d7fd29fff16456ce9c3a23fd2d09a66256b05aff +Subproject commit a8db2a9ce64cd4417f6a312ab61858f17f0f8584 diff --git a/llm/patches/05-default-pretokenizer.diff b/llm/patches/05-default-pretokenizer.diff index f4eaced72..341a6f590 100644 --- a/llm/patches/05-default-pretokenizer.diff +++ b/llm/patches/05-default-pretokenizer.diff @@ -1,11 +1,11 @@ diff --git a/src/llama.cpp b/src/llama.cpp -index 73f52435..2b81b4bd 100644 +index 2b9ace28..172640e2 100644 --- a/src/llama.cpp +++ b/src/llama.cpp -@@ -5092,16 +5092,7 @@ static void llm_load_vocab( - - // for now, only BPE models have pre-tokenizers +@@ -5357,16 +5357,7 @@ static void llm_load_vocab( if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = true; - if (tokenizer_pre.empty()) { - LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__); - LLAMA_LOG_WARN("%s: \n", __func__); @@ -20,7 +20,7 @@ index 73f52435..2b81b4bd 100644 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } else if ( tokenizer_pre == "llama3" || -@@ -5164,7 +5155,8 @@ static void llm_load_vocab( +@@ -5439,7 +5430,8 @@ static void llm_load_vocab( tokenizer_pre == "jais") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS; } else { From d8def1ff9432ef60d1067e5e6dde0d700dd95021 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 13:41:51 -0400 Subject: [PATCH 082/106] llm: allow gemma 2 to context shift (#5534) --- llm/ext_server/server.cpp | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 7ae58e382..0ef3956ec 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1688,22 +1688,8 @@ struct llama_server_context } slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep); - char buf[256]; - llama_model_meta_val_str(model, "general.architecture", buf, 256); - bool gemma2 = strcmp(buf, "gemma2") == 0; - - int32_t truncate_at = slot.n_ctx; - - // truncate at 2/3 of the context length for gemma2 models - // as they do not support context shifts (from the sliding window implementation). - // this way, prompts that almost fit the context length can still generate a full - // response without a sudden stop from hitting the context limit - if (gemma2) { - truncate_at = 2 * slot.n_ctx / 3; - } - // if input prompt is too big, truncate it, if group attention self-extend is disabled - if (slot.ga_n == 1 && slot.n_prompt_tokens >= truncate_at) + if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx) { const int n_left = slot.n_ctx - slot.params.n_keep; const int n_shift = n_left / 2; @@ -1731,19 +1717,6 @@ struct llama_server_context GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx); } - // Models with sliding window attention do not work with context shifts, so - // limit their prediction to the context length - if (gemma2) { - int32_t limit = slot.n_ctx - slot.n_prompt_tokens; - slot.n_predict = limit; - slot.params.n_predict = limit; - LOG_INFO("model does not support sliding window, limiting generation", { - {"n_ctx", slot.n_ctx}, - {"n_prompt_tokens", slot.n_prompt_tokens}, - {"n_predict", slot.n_predict} - }); - } - if (!slot.params.cache_prompt) { llama_sampling_reset(slot.ctx_sampling); From 53da2c69654769c0c086af695722e1d9b9ee6ecc Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 14:32:05 -0400 Subject: [PATCH 083/106] llm: remove ambiguous comment when putting upper limit on predictions to avoid infinite generation (#5535) --- llm/server.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/llm/server.go b/llm/server.go index 206f9e391..54fad92ce 100644 --- a/llm/server.go +++ b/llm/server.go @@ -699,10 +699,9 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu } defer s.sem.Release(1) - // only allow maximum 10 "context shifts" to avoid infinite generation + // put an upper limit on num_predict to avoid the model running on forever if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx { req.Options.NumPredict = 10 * s.options.NumCtx - slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict) } request := map[string]any{ From 0bacb300071ba4baa928075b142633f2e85281ab Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 5 Jul 2024 12:46:28 -0700 Subject: [PATCH 084/106] Workaround broken ROCm p2p copy Enable the build flag for llama.cpp to use CPU copy for multi-GPU scenarios. --- llm/generate/gen_linux.sh | 2 +- llm/generate/gen_windows.ps1 | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index d3e2d13ba..304eadbd9 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -254,7 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true) fi init_vars - CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)" + CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DLLAMA_CUDA_NO_PEER_COPY=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)" # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\"" diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 5c6943502..26bc4fa3e 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -366,6 +366,7 @@ function build_rocm() { "-DCMAKE_C_COMPILER=clang.exe", "-DCMAKE_CXX_COMPILER=clang++.exe", "-DGGML_HIPBLAS=on", + "-DLLAMA_CUDA_NO_PEER_COPY=on", "-DHIP_PLATFORM=amd", "-DGGML_AVX=on", "-DGGML_AVX2=off", From b44320db1302baea88e2f318d984218c68faa5f1 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 8 Jul 2024 18:24:21 -0700 Subject: [PATCH 085/106] Bundle missing CRT libraries Some users are experienging runner startup errors due to not having these msvc redist libraries on their host --- scripts/build_windows.ps1 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/build_windows.ps1 b/scripts/build_windows.ps1 index b3991ce1f..edc737593 100644 --- a/scripts/build_windows.ps1 +++ b/scripts/build_windows.ps1 @@ -107,9 +107,12 @@ function gatherDependencies() { # TODO - this varies based on host build system and MSVC version - drive from dumpbin output # currently works for Win11 + MSVC 2019 + Cuda V11 - cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140.dll" "${script:DEPS_DIR}\ollama_runners\" + cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\msvcp140*.dll" "${script:DEPS_DIR}\ollama_runners\" cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140.dll" "${script:DEPS_DIR}\ollama_runners\" cp "${env:VCToolsRedistDir}\x64\Microsoft.VC*.CRT\vcruntime140_1.dll" "${script:DEPS_DIR}\ollama_runners\" + foreach ($part in $("runtime", "stdio", "filesystem", "math", "convert", "heap", "string", "time", "locale", "environment")) { + cp "$env:VCToolsRedistDir\..\..\..\Tools\Llvm\x64\bin\api-ms-win-crt-${part}*.dll" "${script:DEPS_DIR}\ollama_runners\" + } cp "${script:SRC_DIR}\app\ollama_welcome.ps1" "${script:SRC_DIR}\dist\" From e4ff73297db2f53f1ea4b603df5670c5bde6a944 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Mon, 8 Jul 2024 22:32:15 -0700 Subject: [PATCH 086/106] server: fix model reloads when setting `OLLAMA_NUM_PARALLEL` (#5560) * server: fix unneeded model reloads when setting `OLLAMA_NUM_PARALLEL` * remove whitespace change * undo some changes --- server/sched.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/server/sched.go b/server/sched.go index 9dff2ae07..48047bfec 100644 --- a/server/sched.go +++ b/server/sched.go @@ -133,10 +133,6 @@ func (s *Scheduler) processPending(ctx context.Context) { numParallel = 1 slog.Warn("multimodal models don't support parallel requests yet") } - // Keep NumCtx and numParallel in sync - if numParallel > 1 { - pending.opts.NumCtx = pending.origNumCtx * numParallel - } for { cpus := s.getCpuFn() @@ -234,9 +230,10 @@ func (s *Scheduler) processPending(ctx context.Context) { // simplifying assumption of defaultParallel when in CPU mode if numParallel <= 0 { numParallel = defaultParallel - pending.opts.NumCtx = pending.origNumCtx * numParallel } + pending.opts.NumCtx = pending.origNumCtx * numParallel + if loadedCount == 0 { slog.Debug("cpu mode with first model, loading") s.loadFn(pending, ggml, gpus, numParallel) From b51e3b63ac7bc995e99f3a8f7c1b507a1f8fb5d9 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 9 Jul 2024 11:17:44 -0700 Subject: [PATCH 087/106] Statically link c++ and thread lib This makes sure we statically link the c++ and thread library on windows to avoid unnecessary runtime dependencies on non-standard DLLs --- .github/workflows/release.yaml | 5 ----- llm/llm.go | 4 ++-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0005c69d3..61ca3c433 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -304,11 +304,6 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" - - name: remove unwanted mingw dll.a files - run: | - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/llm/llm.go b/llm/llm.go index 88c0258d6..f2a5e557a 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -4,8 +4,8 @@ package llm // #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src -// #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src -// #cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src +// #cgo windows,amd64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src +// #cgo windows,arm64 LDFLAGS: -static-libstdc++ -static-libgcc -static -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src // #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src // #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src // #include From f6f759fc5fb4868125b8a25c28ce96d2c0980ef7 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 9 Jul 2024 10:27:53 -0700 Subject: [PATCH 088/106] Detect CUDA OS Overhead This adds logic to detect skew between the driver and management library which can be attributed to OS overhead and records that so we can adjust subsequent management library free VRAM updates and avoid OOM scenarios. --- gpu/gpu.go | 27 +++++++++++++++++++++++++++ gpu/types.go | 3 ++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/gpu/gpu.go b/gpu/gpu.go index 29a3c1037..58144991d 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -274,6 +274,28 @@ func GetGPUInfo() GpuInfoList { gpuInfo.DriverMajor = driverMajor gpuInfo.DriverMinor = driverMinor + // query the management library as well so we can record any skew between the two + // which represents overhead on the GPU we must set aside on subsequent updates + if cHandles.nvml != nil { + C.nvml_get_free(*cHandles.nvml, C.int(gpuInfo.index), &memInfo.free, &memInfo.total, &memInfo.used) + if memInfo.err != nil { + slog.Warn("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err)) + C.free(unsafe.Pointer(memInfo.err)) + } else { + if memInfo.free != 0 && uint64(memInfo.free) > gpuInfo.FreeMemory { + gpuInfo.OSOverhead = uint64(memInfo.free) - gpuInfo.FreeMemory + slog.Info("detected OS VRAM overhead", + "id", gpuInfo.ID, + "library", gpuInfo.Library, + "compute", gpuInfo.Compute, + "driver", fmt.Sprintf("%d.%d", gpuInfo.DriverMajor, gpuInfo.DriverMinor), + "name", gpuInfo.Name, + "overhead", format.HumanBytes2(gpuInfo.OSOverhead), + ) + } + } + } + // TODO potentially sort on our own algorithm instead of what the underlying GPU library does... cudaGPUs = append(cudaGPUs, gpuInfo) } @@ -374,9 +396,14 @@ func GetGPUInfo() GpuInfoList { slog.Warn("error looking up nvidia GPU memory") continue } + if cHandles.nvml != nil && gpu.OSOverhead > 0 { + // When using the management library update based on recorded overhead + memInfo.free -= C.uint64_t(gpu.OSOverhead) + } slog.Debug("updating cuda memory data", "gpu", gpu.ID, "name", gpu.Name, + "overhead", format.HumanBytes2(gpu.OSOverhead), slog.Group( "before", "total", format.HumanBytes2(gpu.TotalMemory), diff --git a/gpu/types.go b/gpu/types.go index 2eaa9bae9..7a7749b8e 100644 --- a/gpu/types.go +++ b/gpu/types.go @@ -52,7 +52,8 @@ type CPUInfo struct { type CudaGPUInfo struct { GpuInfo - index int //nolint:unused,nolintlint + OSOverhead uint64 // Memory overhead between the driver library and management library + index int //nolint:unused,nolintlint } type CudaGPUInfoList []CudaGPUInfo From 0aff67877ed01adc00056742c9a88143eeabf0c5 Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Tue, 9 Jul 2024 13:48:31 -0700 Subject: [PATCH 089/106] separate request tests (#5578) --- openai/openai_test.go | 194 +++++++++++++++++------------------------- 1 file changed, 78 insertions(+), 116 deletions(-) diff --git a/openai/openai_test.go b/openai/openai_test.go index 4d21382c6..39e8dc58c 100644 --- a/openai/openai_test.go +++ b/openai/openai_test.go @@ -3,7 +3,6 @@ package openai import ( "bytes" "encoding/json" - "fmt" "io" "net/http" "net/http/httptest" @@ -16,49 +15,33 @@ import ( "github.com/stretchr/testify/assert" ) -func TestMiddleware(t *testing.T) { +func TestMiddlewareRequests(t *testing.T) { type testCase struct { Name string Method string Path string - TestPath string Handler func() gin.HandlerFunc - Endpoint func(c *gin.Context) Setup func(t *testing.T, req *http.Request) - Expected func(t *testing.T, resp *httptest.ResponseRecorder) + Expected func(t *testing.T, req *http.Request) + } + + var capturedRequest *http.Request + + captureRequestMiddleware := func() gin.HandlerFunc { + return func(c *gin.Context) { + bodyBytes, _ := io.ReadAll(c.Request.Body) + c.Request.Body = io.NopCloser(bytes.NewReader(bodyBytes)) + capturedRequest = c.Request + c.Next() + } } testCases := []testCase{ { - Name: "chat handler", - Method: http.MethodPost, - Path: "/api/chat", - TestPath: "/api/chat", - Handler: ChatMiddleware, - Endpoint: func(c *gin.Context) { - var chatReq api.ChatRequest - if err := c.ShouldBindJSON(&chatReq); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) - return - } - - userMessage := chatReq.Messages[0].Content - var assistantMessage string - - switch userMessage { - case "Hello": - assistantMessage = "Hello!" - default: - assistantMessage = "I'm not sure how to respond to that." - } - - c.JSON(http.StatusOK, api.ChatResponse{ - Message: api.Message{ - Role: "assistant", - Content: assistantMessage, - }, - }) - }, + Name: "chat handler", + Method: http.MethodPost, + Path: "/api/chat", + Handler: ChatMiddleware, Setup: func(t *testing.T, req *http.Request) { body := ChatCompletionRequest{ Model: "test-model", @@ -70,88 +53,26 @@ func TestMiddleware(t *testing.T) { req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) req.Header.Set("Content-Type", "application/json") }, - Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusOK, resp.Code) - - var chatResp ChatCompletion - if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil { + Expected: func(t *testing.T, req *http.Request) { + var chatReq api.ChatRequest + if err := json.NewDecoder(req.Body).Decode(&chatReq); err != nil { t.Fatal(err) } - if chatResp.Object != "chat.completion" { - t.Fatalf("expected chat.completion, got %s", chatResp.Object) + if chatReq.Messages[0].Role != "user" { + t.Fatalf("expected 'user', got %s", chatReq.Messages[0].Role) } - if chatResp.Choices[0].Message.Content != "Hello!" { - t.Fatalf("expected Hello!, got %s", chatResp.Choices[0].Message.Content) + if chatReq.Messages[0].Content != "Hello" { + t.Fatalf("expected 'Hello', got %s", chatReq.Messages[0].Content) } }, }, { - Name: "completions handler", - Method: http.MethodPost, - Path: "/api/generate", - TestPath: "/api/generate", - Handler: CompletionsMiddleware, - Endpoint: func(c *gin.Context) { - c.JSON(http.StatusOK, api.GenerateResponse{ - Response: "Hello!", - }) - }, - Setup: func(t *testing.T, req *http.Request) { - body := CompletionRequest{ - Model: "test-model", - Prompt: "Hello", - } - - bodyBytes, _ := json.Marshal(body) - - req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) - req.Header.Set("Content-Type", "application/json") - }, - Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusOK, resp.Code) - var completionResp Completion - if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil { - t.Fatal(err) - } - - if completionResp.Object != "text_completion" { - t.Fatalf("expected text_completion, got %s", completionResp.Object) - } - - if completionResp.Choices[0].Text != "Hello!" { - t.Fatalf("expected Hello!, got %s", completionResp.Choices[0].Text) - } - }, - }, - { - Name: "completions handler with params", - Method: http.MethodPost, - Path: "/api/generate", - TestPath: "/api/generate", - Handler: CompletionsMiddleware, - Endpoint: func(c *gin.Context) { - var generateReq api.GenerateRequest - if err := c.ShouldBindJSON(&generateReq); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) - return - } - - temperature := generateReq.Options["temperature"].(float64) - var assistantMessage string - - switch temperature { - case 1.6: - assistantMessage = "Received temperature of 1.6" - default: - assistantMessage = fmt.Sprintf("Received temperature of %f", temperature) - } - - c.JSON(http.StatusOK, api.GenerateResponse{ - Response: assistantMessage, - }) - }, + Name: "completions handler", + Method: http.MethodPost, + Path: "/api/generate", + Handler: CompletionsMiddleware, Setup: func(t *testing.T, req *http.Request) { temp := float32(0.8) body := CompletionRequest{ @@ -165,24 +86,65 @@ func TestMiddleware(t *testing.T) { req.Body = io.NopCloser(bytes.NewReader(bodyBytes)) req.Header.Set("Content-Type", "application/json") }, - Expected: func(t *testing.T, resp *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusOK, resp.Code) - var completionResp Completion - if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil { + Expected: func(t *testing.T, req *http.Request) { + var genReq api.GenerateRequest + if err := json.NewDecoder(req.Body).Decode(&genReq); err != nil { t.Fatal(err) } - if completionResp.Object != "text_completion" { - t.Fatalf("expected text_completion, got %s", completionResp.Object) + if genReq.Prompt != "Hello" { + t.Fatalf("expected 'Hello', got %s", genReq.Prompt) } - if completionResp.Choices[0].Text != "Received temperature of 1.6" { - t.Fatalf("expected Received temperature of 1.6, got %s", completionResp.Choices[0].Text) + if genReq.Options["temperature"] != 1.6 { + t.Fatalf("expected 1.6, got %f", genReq.Options["temperature"]) } }, }, + } + + gin.SetMode(gin.TestMode) + router := gin.New() + + endpoint := func(c *gin.Context) { + c.Status(http.StatusOK) + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + router = gin.New() + router.Use(captureRequestMiddleware()) + router.Use(tc.Handler()) + router.Handle(tc.Method, tc.Path, endpoint) + req, _ := http.NewRequest(tc.Method, tc.Path, nil) + + if tc.Setup != nil { + tc.Setup(t, req) + } + + resp := httptest.NewRecorder() + router.ServeHTTP(resp, req) + + tc.Expected(t, capturedRequest) + }) + } +} + +func TestMiddlewareResponses(t *testing.T) { + type testCase struct { + Name string + Method string + Path string + TestPath string + Handler func() gin.HandlerFunc + Endpoint func(c *gin.Context) + Setup func(t *testing.T, req *http.Request) + Expected func(t *testing.T, resp *httptest.ResponseRecorder) + } + + testCases := []testCase{ { - Name: "completions handler with error", + Name: "completions handler error forwarding", Method: http.MethodPost, Path: "/api/generate", TestPath: "/api/generate", From 4918fae535cb3d146100bacc0eff67a8579a8a7f Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Tue, 9 Jul 2024 14:01:26 -0700 Subject: [PATCH 090/106] OpenAI v1/completions: allow stop token list (#5551) * stop token parsing fix * add stop test --- openai/openai.go | 14 +++++++++----- openai/openai_test.go | 11 +++++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/openai/openai.go b/openai/openai.go index f1e75bf21..1707da14b 100644 --- a/openai/openai.go +++ b/openai/openai.go @@ -338,12 +338,16 @@ func fromCompleteRequest(r CompletionRequest) (api.GenerateRequest, error) { switch stop := r.Stop.(type) { case string: options["stop"] = []string{stop} - case []string: - options["stop"] = stop - default: - if r.Stop != nil { - return api.GenerateRequest{}, fmt.Errorf("invalid type for 'stop' field: %T", r.Stop) + case []any: + var stops []string + for _, s := range stop { + if str, ok := s.(string); ok { + stops = append(stops, str) + } else { + return api.GenerateRequest{}, fmt.Errorf("invalid type for 'stop' field: %T", s) + } } + options["stop"] = stops } if r.MaxTokens != nil { diff --git a/openai/openai_test.go b/openai/openai_test.go index 39e8dc58c..5f1ae52e9 100644 --- a/openai/openai_test.go +++ b/openai/openai_test.go @@ -79,6 +79,7 @@ func TestMiddlewareRequests(t *testing.T) { Model: "test-model", Prompt: "Hello", Temperature: &temp, + Stop: []string{"\n", "stop"}, } bodyBytes, _ := json.Marshal(body) @@ -99,6 +100,16 @@ func TestMiddlewareRequests(t *testing.T) { if genReq.Options["temperature"] != 1.6 { t.Fatalf("expected 1.6, got %f", genReq.Options["temperature"]) } + + stopTokens, ok := genReq.Options["stop"].([]any) + + if !ok { + t.Fatalf("expected stop tokens to be a list") + } + + if stopTokens[0] != "\n" || stopTokens[1] != "stop" { + t.Fatalf("expected ['\\n', 'stop'], got %v", stopTokens) + } }, }, } From 22c81f62ec845bd8f77215ae5599be14117ec8db Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 10 Jul 2024 09:01:33 -0700 Subject: [PATCH 091/106] Remove duplicate merge glitch --- llm/server.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/llm/server.go b/llm/server.go index 08dc04d5a..aa504d193 100644 --- a/llm/server.go +++ b/llm/server.go @@ -254,10 +254,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr params = append(params, "--tensor-split", estimate.TensorSplit) } - if estimate.TensorSplit != "" { - params = append(params, "--tensor-split", estimate.TensorSplit) - } - for i := range len(servers) { dir := availableServers[servers[i]] if dir == "" { From 1f50356e8e3c3a2956c5ffacc3b9fa33b8285541 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 10 Jul 2024 11:01:22 -0700 Subject: [PATCH 092/106] Bump ROCm on windows to 6.1.2 This also adjusts our algorithm to favor our bundled ROCm. I've confirmed VRAM reporting still doesn't work properly so we can't yet enable concurrency by default. --- .github/workflows/release.yaml | 2 +- .github/workflows/test.yaml | 2 +- docs/faq.md | 2 +- gpu/amd_common.go | 23 +++++++++++------------ gpu/amd_windows.go | 4 ++-- llm/generate/gen_windows.ps1 | 12 +----------- 6 files changed, 17 insertions(+), 28 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 61ca3c433..5ae630c31 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -147,7 +147,7 @@ jobs: run: | $ErrorActionPreference = "Stop" write-host "downloading AMD HIP Installer" - Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" + Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" write-host "Installing AMD HIP" Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait write-host "Completed AMD HIP" diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 13d1c957c..977d8da14 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -169,7 +169,7 @@ jobs: run: | $ErrorActionPreference = "Stop" write-host "downloading AMD HIP Installer" - Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" + Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-24.Q3-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" write-host "Installing AMD HIP" Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait write-host "Completed AMD HIP" diff --git a/docs/faq.md b/docs/faq.md index 574112461..da1848f7f 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -272,4 +272,4 @@ The following server settings may be used to adjust how Ollama handles concurren - `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory. - `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512 -Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. \ No newline at end of file +Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6.2 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM. \ No newline at end of file diff --git a/gpu/amd_common.go b/gpu/amd_common.go index 27a81e3f8..7d1cab7c1 100644 --- a/gpu/amd_common.go +++ b/gpu/amd_common.go @@ -49,9 +49,17 @@ func rocmGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) { } func commonAMDValidateLibDir() (string, error) { - // We try to favor system paths first, so that we can wire up the subprocess to use - // the system version. Only use our bundled version if the system version doesn't work - // This gives users a more recovery options if versions have subtle problems at runtime + // Favor our bundled version + + // Installer payload location if we're running the installed binary + exe, err := os.Executable() + if err == nil { + rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm") + if rocmLibUsable(rocmTargetDir) { + slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) + return rocmTargetDir, nil + } + } // Prefer explicit HIP env var hipPath := os.Getenv("HIP_PATH") @@ -87,14 +95,5 @@ func commonAMDValidateLibDir() (string, error) { } } - // Installer payload location if we're running the installed binary - exe, err := os.Executable() - if err == nil { - rocmTargetDir := filepath.Join(filepath.Dir(exe), "rocm") - if rocmLibUsable(rocmTargetDir) { - slog.Debug("detected ROCM next to ollama executable " + rocmTargetDir) - return rocmTargetDir, nil - } - } return "", fmt.Errorf("no suitable rocm found, falling back to CPU") } diff --git a/gpu/amd_windows.go b/gpu/amd_windows.go index 8b6fabebb..5d09be8bd 100644 --- a/gpu/amd_windows.go +++ b/gpu/amd_windows.go @@ -22,8 +22,8 @@ const ( var ( // Used to validate if the given ROCm lib is usable - ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // TODO - probably include more coverage of files here... - RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\5.7\\bin"} // TODO glob? + ROCmLibGlobs = []string{"hipblas.dll", "rocblas"} // This is not sufficient to discern v5 vs v6 + RocmStandardLocations = []string{"C:\\Program Files\\AMD\\ROCm\\6.1\\bin"} // TODO glob? ) func AMDGetGPUInfo() []RocmGPUInfo { diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 26bc4fa3e..beb964f98 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -6,18 +6,9 @@ function amdGPUs { if ($env:AMDGPU_TARGETS) { return $env:AMDGPU_TARGETS } - # TODO - load from some common data file for linux + windows build consistency + # Current supported rocblas list from ROCm v6.1.2 on windows $GPU_LIST = @( - "gfx900" "gfx906:xnack-" - "gfx908:xnack-" - "gfx90a:xnack+" - "gfx90a:xnack-" - "gfx940" - "gfx941" - "gfx942" - "gfx1010" - "gfx1012" "gfx1030" "gfx1100" "gfx1101" @@ -395,7 +386,6 @@ function build_rocm() { sign install - # Assumes v5.7, may need adjustments for v6 rm -ea 0 -recurse -force -path "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" md "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\rocblas\library\" -ea 0 > $null cp "${env:HIP_PATH}\bin\hipblas.dll" "${script:SRC_DIR}\dist\windows-${script:ARCH}\rocm\" From 4e262eb2a8aaee31e228febc216c2a83a9a7e4d8 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 10 Jul 2024 13:17:13 -0700 Subject: [PATCH 093/106] remove `GGML_CUDA_FORCE_MMQ=on` from build (#5588) --- llm/generate/gen_linux.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 304eadbd9..5589f1ead 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -178,7 +178,7 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" echo "Building custom CUDA GPU" else - CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DGGML_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" fi CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" From 5a739ff4cb27f7804903adfb674f8a1e197ea86f Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 10 Jul 2024 13:18:04 -0700 Subject: [PATCH 094/106] chatglm graph --- llm/ggml.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/llm/ggml.go b/llm/ggml.go index cfead450d..fddb50391 100644 --- a/llm/ggml.go +++ b/llm/ggml.go @@ -424,6 +424,32 @@ func (llm GGML) GraphSize(context, batch uint64) (partialOffload, fullOffload ui 4*batch*(3*embedding+vocab)+embedding*vocab*105/128, 4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16, ) + case "chatglm": + fullOffload = 4 * batch * (embedding + vocab) + partialOffload = 4*batch*(embedding+vocab) + embedding*vocab*105/128 + if qkvBias, ok := layers["blk.0"]["attn_qkv.bias"]; ok { + fullOffload = max( + fullOffload, + 4*batch*(2+ + 2*embedding+ + context+ + context*heads+ + embeddingHeadsK*heads+ + qkvBias.Shape[0]), + ) + + partialOffload = max( + partialOffload, + 4*batch*(1+ + 2*embedding+ + embeddingHeadsK*heads+ + context+ + context*heads)+ + 4*embeddingHeadsK*context+ + 4*context*embeddingHeadsK+ + 4*qkvBias.Shape[0], + ) + } } return From 41be28096aa597ded1ef91774ba3e6dfc0a8ccbb Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 10 Jul 2024 11:00:07 -0700 Subject: [PATCH 095/106] add system prompt to first legacy template --- server/prompt_test.go | 2 +- server/routes_create_test.go | 4 +- template/template.go | 101 +++++++++++++++++++++++++++++++---- template/template_test.go | 61 ++++++++++++++++----- 4 files changed, 140 insertions(+), 28 deletions(-) diff --git a/server/prompt_test.go b/server/prompt_test.go index d4cee98c2..1435b143a 100644 --- a/server/prompt_test.go +++ b/server/prompt_test.go @@ -161,7 +161,7 @@ func TestChatPrompt(t *testing.T) { {Role: "user", Content: "A test. And a thumping good one at that, I'd wager."}, }, expect: expect{ - prompt: "You're a test, Harry! I-I'm a what? You are the Test Who Lived. A test. And a thumping good one at that, I'd wager. ", + prompt: "You are the Test Who Lived. You're a test, Harry! I-I'm a what? A test. And a thumping good one at that, I'd wager. ", }, }, } diff --git a/server/routes_create_test.go b/server/routes_create_test.go index 269a0ba12..40477937b 100644 --- a/server/routes_create_test.go +++ b/server/routes_create_test.go @@ -546,8 +546,8 @@ func TestCreateDetectTemplate(t *testing.T) { checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"), - filepath.Join(p, "blobs", "sha256-9512c372dfc7d84d6065b8dd2b601aeed8cc1a78e7a7aa784a42fff37f5524b7"), - filepath.Join(p, "blobs", "sha256-b8b78cb8c6eefd14c06f1af042e6161255bf87bbf2dd14fce57cdac893db8139"), + filepath.Join(p, "blobs", "sha256-68b0323b2f21572bc09ba07554b16b379a5713ee48ef8c25a7661a1f71cfce77"), + filepath.Join(p, "blobs", "sha256-eb72fb7c550ee1f1dec4039bd65382acecf5f7536a30fb7ccace39a8d0cb590b"), }) }) diff --git a/template/template.go b/template/template.go index b133b97e9..0b8f24348 100644 --- a/template/template.go +++ b/template/template.go @@ -143,11 +143,14 @@ func (t *Template) Vars() []string { type Values struct { Messages []api.Message + + // forceLegacy is a flag used to test compatibility with legacy templates + forceLegacy bool } func (t *Template) Execute(w io.Writer, v Values) error { system, collated := collate(v.Messages) - if slices.Contains(t.Vars(), "messages") { + if !v.forceLegacy && slices.Contains(t.Vars(), "messages") { return t.Template.Execute(w, map[string]any{ "System": system, "Messages": collated, @@ -157,15 +160,19 @@ func (t *Template) Execute(w io.Writer, v Values) error { var b bytes.Buffer var prompt, response string for i, m := range collated { - if m.Role == "user" { + switch m.Role { + case "user": prompt = m.Content - } else { + if i != 0 { + system = "" + } + case "assistant": response = m.Content } if i != len(collated)-1 && prompt != "" && response != "" { if err := t.Template.Execute(&b, map[string]any{ - "System": "", + "System": system, "Prompt": prompt, "Response": response, }); err != nil { @@ -178,18 +185,21 @@ func (t *Template) Execute(w io.Writer, v Values) error { } var cut bool - tree := t.Template.Copy() - // for the last message, cut everything after "{{ .Response }}" - tree.Root.Nodes = slices.DeleteFunc(tree.Root.Nodes, func(n parse.Node) bool { - if slices.Contains(parseNode(n), "Response") { - cut = true + nodes := deleteNode(t.Template.Root.Copy(), func(n parse.Node) bool { + switch t := n.(type) { + case *parse.ActionNode: + case *parse.FieldNode: + if slices.Contains(t.Ident, "Response") { + cut = true + } } return cut }) - if err := template.Must(template.New("").AddParseTree("", tree)).Execute(&b, map[string]any{ - "System": system, + tree := parse.Tree{Root: nodes.(*parse.ListNode)} + if err := template.Must(template.New("").AddParseTree("", &tree)).Execute(&b, map[string]any{ + "System": "", "Prompt": prompt, }); err != nil { return err @@ -286,3 +296,72 @@ func parseNode(n parse.Node) []string { return nil } + +// deleteNode walks the node list and deletes nodes that match the predicate +// this is currently to remove the {{ .Response }} node from templates +func deleteNode(n parse.Node, fn func(parse.Node) bool) parse.Node { + var walk func(n parse.Node) parse.Node + walk = func(n parse.Node) parse.Node { + if fn(n) { + return nil + } + + switch t := n.(type) { + case *parse.ListNode: + var nodes []parse.Node + for _, c := range t.Nodes { + if n := walk(c); n != nil { + nodes = append(nodes, n) + } + } + + t.Nodes = nodes + return t + case *parse.IfNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.WithNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.RangeNode: + t.BranchNode = *(walk(&t.BranchNode).(*parse.BranchNode)) + case *parse.BranchNode: + t.List = walk(t.List).(*parse.ListNode) + if t.ElseList != nil { + t.ElseList = walk(t.ElseList).(*parse.ListNode) + } + case *parse.ActionNode: + n := walk(t.Pipe) + if n == nil { + return nil + } + + t.Pipe = n.(*parse.PipeNode) + case *parse.PipeNode: + var commands []*parse.CommandNode + for _, c := range t.Cmds { + var args []parse.Node + for _, a := range c.Args { + if n := walk(a); n != nil { + args = append(args, n) + } + } + + if len(args) == 0 { + return nil + } + + c.Args = args + commands = append(commands, c) + } + + if len(commands) == 0 { + return nil + } + + t.Cmds = commands + } + + return n + } + + return walk(n) +} diff --git a/template/template_test.go b/template/template_test.go index 428cdc77c..e702a1862 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -105,8 +105,8 @@ func TestTemplate(t *testing.T) { } for n, tt := range cases { + var actual bytes.Buffer t.Run(n, func(t *testing.T) { - var actual bytes.Buffer if err := tmpl.Execute(&actual, Values{Messages: tt}); err != nil { t.Fatal(err) } @@ -120,6 +120,25 @@ func TestTemplate(t *testing.T) { t.Errorf("mismatch (-got +want):\n%s", diff) } }) + + t.Run("legacy", func(t *testing.T) { + var legacy bytes.Buffer + if err := tmpl.Execute(&legacy, Values{Messages: tt, forceLegacy: true}); err != nil { + t.Fatal(err) + } + + legacyBytes := legacy.Bytes() + if slices.Contains([]string{"chatqa.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && legacyBytes[len(legacyBytes)-1] == ' ' { + t.Log("removing trailing space from legacy output") + legacyBytes = legacyBytes[:len(legacyBytes)-1] + } else if slices.Contains([]string{"codellama-70b-instruct.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl"}, match) { + t.Skip("legacy outputs cannot be compared to messages outputs") + } + + if diff := cmp.Diff(legacyBytes, actual.Bytes()); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) } }) } @@ -136,6 +155,21 @@ func TestParse(t *testing.T) { {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system", "tools"}}, {"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}}, {"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}}, + {`{{- if .Messages }} +{{- if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }} +{{- range .Messages }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ else -}} +{{ if .System }}<|im_start|>system +{{ .System }}<|im_end|> +{{ end }}{{ if .Prompt }}<|im_start|>user +{{ .Prompt }}<|im_end|> +{{ end }}<|im_start|>assistant +{{ .Response }}<|im_end|> +{{- end -}}`, []string{"content", "messages", "prompt", "response", "role", "system"}}, } for _, tt := range cases { @@ -145,9 +179,8 @@ func TestParse(t *testing.T) { t.Fatal(err) } - vars := tmpl.Vars() - if !slices.Equal(tt.vars, vars) { - t.Errorf("expected %v, got %v", tt.vars, vars) + if diff := cmp.Diff(tmpl.Vars(), tt.vars); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) } }) } @@ -170,7 +203,7 @@ func TestExecuteWithMessages(t *testing.T) { {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", `{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} +{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, @@ -191,7 +224,7 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, {"messages", ` {{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq (len (slice $.Messages $index)) 1) $.System }}{{ $.System }}{{ "\n\n" }} +{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} {{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, @@ -204,9 +237,9 @@ func TestExecuteWithMessages(t *testing.T) { {Role: "user", Content: "What is your name?"}, }, }, - `[INST] Hello friend![/INST] Hello human![INST] You are a helpful assistant! + `[INST] You are a helpful assistant! -What is your name?[/INST] `, +Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, }, { "chatml", @@ -221,7 +254,7 @@ What is your name?[/INST] `, `}, {"messages", ` {{- range $index, $_ := .Messages }} -{{- if and (eq .Role "user") (eq (len (slice $.Messages $index)) 1) $.System }}<|im_start|>system +{{- if and (eq .Role "user") (eq $index 0) $.System }}<|im_start|>system {{ $.System }}<|im_end|>{{ "\n" }} {{- end }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|>{{ "\n" }} @@ -236,12 +269,12 @@ What is your name?[/INST] `, {Role: "user", Content: "What is your name?"}, }, }, - `<|im_start|>user + `<|im_start|>system +You are a helpful assistant!<|im_end|> +<|im_start|>user Hello friend!<|im_end|> <|im_start|>assistant Hello human!<|im_end|> -<|im_start|>system -You are a helpful assistant!<|im_end|> <|im_start|>user What is your name?<|im_end|> <|im_start|>assistant @@ -300,8 +333,8 @@ Answer: `, t.Fatal(err) } - if b.String() != tt.expected { - t.Errorf("expected\n%s,\ngot\n%s", tt.expected, b.String()) + if diff := cmp.Diff(b.String(), tt.expected); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) } }) } From 19753c18c01183b4c974e36e89b0c7cbdcc3c38a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 10 Jul 2024 11:00:29 -0700 Subject: [PATCH 096/106] update embedded templates --- template/alfred.gotmpl | 4 ++-- template/alpaca.gotmpl | 8 +++++--- template/chatml.gotmpl | 4 ++-- template/chatqa.gotmpl | 7 ++++--- template/codellama-70b-instruct.gotmpl | 10 +++++----- template/falcon-instruct.gotmpl | 12 +++++++----- template/gemma-instruct.gotmpl | 7 ++++--- template/granite-instruct.gotmpl | 8 ++++---- template/llama2-chat.gotmpl | 8 ++++---- template/llama3-instruct.gotmpl | 4 ++-- template/magicoder.gotmpl | 5 +++-- template/mistral-instruct.gotmpl | 5 +++-- template/openchat.gotmpl | 12 ++++++------ template/phi-3.gotmpl | 4 ++-- template/solar-instruct.gotmpl | 7 ++++--- template/starcoder2-instruct.gotmpl | 5 ++--- .../alpaca.gotmpl/system-user-assistant-user | 4 +++- .../system-user-assistant-user | 1 + template/testdata/codellama-70b-instruct.gotmpl/user | 1 + .../user-assistant-user | 1 + .../openchat.gotmpl/system-user-assistant-user | 2 +- template/testdata/openchat.gotmpl/user | 2 +- .../testdata/openchat.gotmpl/user-assistant-user | 2 +- template/vicuna.gotmpl | 7 ++++--- template/zephyr.gotmpl | 4 ++-- 25 files changed, 74 insertions(+), 60 deletions(-) diff --git a/template/alfred.gotmpl b/template/alfred.gotmpl index 44284f04c..71bc6706f 100644 --- a/template/alfred.gotmpl +++ b/template/alfred.gotmpl @@ -3,6 +3,6 @@ {{- end }} {{- range .Messages }}{{ .Content }} {{- end }} -{{- else }} +{{- else -}} {{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} -{{- end }} \ No newline at end of file +{{- end -}} \ No newline at end of file diff --git a/template/alpaca.gotmpl b/template/alpaca.gotmpl index c1f69dc92..e9becb3d4 100644 --- a/template/alpaca.gotmpl +++ b/template/alpaca.gotmpl @@ -1,6 +1,7 @@ {{- if .Messages }} {{- if .System }}{{ .System }} -{{- end }} + +{{ end }} {{- range .Messages }} {{- if eq .Role "user" }}### Instruction: {{- else if eq .Role "assistant" }}### Response: @@ -8,7 +9,7 @@ {{ .Content }} {{ end }}### Response: -{{ else }} +{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction: @@ -16,4 +17,5 @@ {{ end }}### Response: {{ .Response }} -{{- end }} \ No newline at end of file + +{{ end -}} \ No newline at end of file diff --git a/template/chatml.gotmpl b/template/chatml.gotmpl index d945547c7..eb8ab0dcd 100644 --- a/template/chatml.gotmpl +++ b/template/chatml.gotmpl @@ -5,11 +5,11 @@ {{- range .Messages }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|> {{ end }}<|im_start|>assistant -{{ else }} +{{ else -}} {{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/chatqa.gotmpl b/template/chatqa.gotmpl index 7022c4790..41c6ced59 100644 --- a/template/chatqa.gotmpl +++ b/template/chatqa.gotmpl @@ -8,10 +8,11 @@ {{- end }} {{ .Content }} {{ end }}Assistant: -{{- else }} +{{- else -}} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: <|begin_of_text|>{{ .Response }} -{{- end }} \ No newline at end of file +{{ end }}Assistant: {{ .Response }} + +{{ end -}} \ No newline at end of file diff --git a/template/codellama-70b-instruct.gotmpl b/template/codellama-70b-instruct.gotmpl index 392d839eb..0a313d389 100644 --- a/template/codellama-70b-instruct.gotmpl +++ b/template/codellama-70b-instruct.gotmpl @@ -7,13 +7,13 @@ {{ .Content }} {{ end }}Source: assistant Destination: user -{{ else }} -{{ if .System }} Source: system + {{ else -}} +{{ if .System }}Source: system - {{ .System }} {{ end }} Source: user + {{ .System }} {{ end }}Source: user {{ .Prompt }} Source: assistant Destination: user - {{ .Response }} -{{- end }} \ No newline at end of file + {{ .Response }} +{{- end -}} \ No newline at end of file diff --git a/template/falcon-instruct.gotmpl b/template/falcon-instruct.gotmpl index 99d67f93c..3a403007e 100644 --- a/template/falcon-instruct.gotmpl +++ b/template/falcon-instruct.gotmpl @@ -6,8 +6,10 @@ {{ else if eq .Role "assistant" }}Falcon: {{ end }}{{ .Content }} {{ end }}Falcon: -{{ else }} -{{ if .System }}{{ .System }} -{{ end }}{{ if .Prompt }}User: {{ .Prompt }} -{{ end }}Assistant: {{ .Response }} -{{- end }} \ No newline at end of file +{{ else -}} +{{ if .System }}System: {{ .System }} +{{ end }}{{ if .Prompt }}User: +{{ .Prompt }} +{{ end }}Falcon: +{{ .Response }} +{{ end -}} \ No newline at end of file diff --git a/template/gemma-instruct.gotmpl b/template/gemma-instruct.gotmpl index 870a8f2e2..6d778a70f 100644 --- a/template/gemma-instruct.gotmpl +++ b/template/gemma-instruct.gotmpl @@ -8,9 +8,10 @@ {{- end }} {{ .Content }} {{ end }}model -{{ else }} +{{ else -}} user -{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} +{{ if .System }}{{ .System }} +{{ end }}{{ .Prompt }} model {{ .Response }} -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/granite-instruct.gotmpl b/template/granite-instruct.gotmpl index 327ff3eef..4a85a97be 100644 --- a/template/granite-instruct.gotmpl +++ b/template/granite-instruct.gotmpl @@ -10,9 +10,8 @@ {{ .Content }} {{ end }}Answer: -{{ else }} -{{ if .System }} -System: +{{ else -}} +{{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}Question: @@ -20,4 +19,5 @@ System: {{ end }}Answer: {{ .Response }} -{{- end }} \ No newline at end of file + +{{ end -}} \ No newline at end of file diff --git a/template/llama2-chat.gotmpl b/template/llama2-chat.gotmpl index 6327d5812..1816fefd8 100644 --- a/template/llama2-chat.gotmpl +++ b/template/llama2-chat.gotmpl @@ -9,8 +9,8 @@ {{- else }} [/INST] {{ .Content }} {{- end }} {{- end }} [/INST] -{{- else }} -[INST] <>{{ .System }}<> +{{- else -}} +[INST] <>{{ if .System }}{{ .System }}{{ end }}<> -{{ .Prompt }} [/INST] {{ .Response }} -{{- end }} \ No newline at end of file +{{ .Prompt }} [/INST] {{ .Response }} +{{- end -}} \ No newline at end of file diff --git a/template/llama3-instruct.gotmpl b/template/llama3-instruct.gotmpl index 9c81a9535..7947b8da5 100644 --- a/template/llama3-instruct.gotmpl +++ b/template/llama3-instruct.gotmpl @@ -8,7 +8,7 @@ {{ .Content }}<|eot_id|> {{- end }}<|start_header_id|>assistant<|end_header_id|> -{{ else }} +{{ else -}} {{ if .System }}<|start_header_id|>system<|end_header_id|> {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> @@ -16,4 +16,4 @@ {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> {{ .Response }}<|eot_id|> -{{- end }} \ No newline at end of file +{{- end -}} \ No newline at end of file diff --git a/template/magicoder.gotmpl b/template/magicoder.gotmpl index 73a58127c..9227b6661 100644 --- a/template/magicoder.gotmpl +++ b/template/magicoder.gotmpl @@ -9,7 +9,7 @@ {{ .Content }} {{ end }}@@ Response -{{ else }} +{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}@@ Instruction @@ -17,4 +17,5 @@ {{ end }}@@ Response {{ .Response }} -{{- end }} \ No newline at end of file + +{{ end -}} \ No newline at end of file diff --git a/template/mistral-instruct.gotmpl b/template/mistral-instruct.gotmpl index eb3d5ced2..1d746dfd2 100644 --- a/template/mistral-instruct.gotmpl +++ b/template/mistral-instruct.gotmpl @@ -5,5 +5,6 @@ {{- else if eq .Role "assistant" }}[/INST] {{ .Content }} {{- end }} {{- end }}[/INST] -{{- else }}[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} [/INST] {{ .Response }} -{{- end }} \ No newline at end of file +{{- else -}} +[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }} +{{- end -}} \ No newline at end of file diff --git a/template/openchat.gotmpl b/template/openchat.gotmpl index d5e1cbb0d..649f0509c 100644 --- a/template/openchat.gotmpl +++ b/template/openchat.gotmpl @@ -1,11 +1,11 @@ {{- if .Messages }} -{{- if .System }}GPT Correct System: {{ .System }}<|end_of_turn|> +{{- if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|> {{- end }} -{{- range .Messages }}GPT Correct +{{- range .Messages }}GPT4 Correct {{- if eq .Role "user" }} User: {{- else if eq .Role "assistant" }} Assistant: {{- end }} {{ .Content }}<|end_of_turn|> -{{- end }}GPT Correct Assistant: -{{- else }} -{{ .System }}<|end_of_turn|>GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> -{{- end }} \ No newline at end of file +{{- end }}GPT4 Correct Assistant: +{{- else -}} +{{ if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|>{{ end }}GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> +{{- end -}} \ No newline at end of file diff --git a/template/phi-3.gotmpl b/template/phi-3.gotmpl index a3558d2b7..4ca56e952 100644 --- a/template/phi-3.gotmpl +++ b/template/phi-3.gotmpl @@ -5,11 +5,11 @@ {{- range .Messages }}<|{{ .Role }}|> {{ .Content }}<|end|> {{ end }}<|assistant|> -{{ else }} +{{ else -}} {{ if .System }}<|system|> {{ .System }}<|end|> {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }}<|end|> {{ end }}<|assistant|> {{ .Response }}<|end|> -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/solar-instruct.gotmpl b/template/solar-instruct.gotmpl index caa6e8e77..8a8331ca4 100644 --- a/template/solar-instruct.gotmpl +++ b/template/solar-instruct.gotmpl @@ -10,7 +10,7 @@ {{ .Content }} {{ end }} {{ end }}### Assistant: -{{ else }} +{{ else -}} {{ if .System }}### System: {{ .System }} @@ -18,5 +18,6 @@ {{ .Prompt }} {{ end }}### Assistant: -{{ .Response }} -{{- end }} \ No newline at end of file +{{ .Response }} + +{{ end -}} \ No newline at end of file diff --git a/template/starcoder2-instruct.gotmpl b/template/starcoder2-instruct.gotmpl index 7d7ff9326..17c6ad755 100644 --- a/template/starcoder2-instruct.gotmpl +++ b/template/starcoder2-instruct.gotmpl @@ -11,14 +11,13 @@ {{ end }} {{- end }}### Response -{{ else }} +{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction {{ .Prompt }} - {{ end }}### Response {{ .Response }}<|endoftext|> -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file diff --git a/template/testdata/alpaca.gotmpl/system-user-assistant-user b/template/testdata/alpaca.gotmpl/system-user-assistant-user index 20182d829..4caa81788 100644 --- a/template/testdata/alpaca.gotmpl/system-user-assistant-user +++ b/template/testdata/alpaca.gotmpl/system-user-assistant-user @@ -1,4 +1,6 @@ -You are a helpful assistant.### Instruction: +You are a helpful assistant. + +### Instruction: Hello, how are you? ### Response: diff --git a/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user index fdd0fc8b4..d7528f80c 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user +++ b/template/testdata/codellama-70b-instruct.gotmpl/system-user-assistant-user @@ -9,3 +9,4 @@ Source: system I'd like to show off how chat templating works! Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user b/template/testdata/codellama-70b-instruct.gotmpl/user index 9e7174a84..8e07853ca 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/user +++ b/template/testdata/codellama-70b-instruct.gotmpl/user @@ -3,3 +3,4 @@ Source: user Hello, how are you? Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user index b4ba1736b..f732cc746 100644 --- a/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user +++ b/template/testdata/codellama-70b-instruct.gotmpl/user-assistant-user @@ -7,3 +7,4 @@ Source: user I'd like to show off how chat templating works! Source: assistant Destination: user + \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/system-user-assistant-user b/template/testdata/openchat.gotmpl/system-user-assistant-user index 1214c1264..404b071aa 100644 --- a/template/testdata/openchat.gotmpl/system-user-assistant-user +++ b/template/testdata/openchat.gotmpl/system-user-assistant-user @@ -1 +1 @@ -GPT Correct System: You are a helpful assistant.<|end_of_turn|>GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct System: You are a helpful assistant.<|end_of_turn|>GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user b/template/testdata/openchat.gotmpl/user index 611daa83e..48229cb0e 100644 --- a/template/testdata/openchat.gotmpl/user +++ b/template/testdata/openchat.gotmpl/user @@ -1 +1 @@ -GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/testdata/openchat.gotmpl/user-assistant-user b/template/testdata/openchat.gotmpl/user-assistant-user index f97b02b9c..4719abb2d 100644 --- a/template/testdata/openchat.gotmpl/user-assistant-user +++ b/template/testdata/openchat.gotmpl/user-assistant-user @@ -1 +1 @@ -GPT Correct User: Hello, how are you?<|end_of_turn|>GPT Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT Correct Assistant: \ No newline at end of file +GPT4 Correct User: Hello, how are you?<|end_of_turn|>GPT4 Correct Assistant: I'm doing great. How can I help you today?<|end_of_turn|>GPT4 Correct User: I'd like to show off how chat templating works!<|end_of_turn|>GPT4 Correct Assistant: \ No newline at end of file diff --git a/template/vicuna.gotmpl b/template/vicuna.gotmpl index 2e13e990d..01465b997 100644 --- a/template/vicuna.gotmpl +++ b/template/vicuna.gotmpl @@ -7,8 +7,9 @@ {{ else if eq .Role "assistant" }}ASSISTANT: {{ .Content }} {{ end }} {{- end }}ASSISTANT: -{{- else }} +{{- else -}} {{ if .System }}{{ .System }} + {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} -{{ end }}ASSISTANT: {{ .Response }} -{{- end }} \ No newline at end of file +{{ end }}ASSISTANT: {{ .Response }} +{{ end -}} \ No newline at end of file diff --git a/template/zephyr.gotmpl b/template/zephyr.gotmpl index e66688480..3ca1d1a1c 100644 --- a/template/zephyr.gotmpl +++ b/template/zephyr.gotmpl @@ -5,11 +5,11 @@ {{- range .Messages }}<|{{ .Role }}|> {{ .Content }} {{ end }}<|assistant|> -{{ else }} +{{ else -}} {{ if .System }}<|system|> {{ .System }} {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }} {{ end }}<|assistant|> {{ .Response }} -{{- end }} \ No newline at end of file +{{ end -}} \ No newline at end of file From efbf41ed8151098b942c142e2522b9ab8364f97a Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 10 Jul 2024 20:01:52 -0700 Subject: [PATCH 097/106] llm: dont link cuda with compat libs (#5621) --- llm/generate/gen_linux.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 5589f1ead..db2c6c30c 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -178,7 +178,7 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" echo "Building custom CUDA GPU" else - CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}" fi CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" From 791650ddef9eb11e011506dbd5d22ed6bfcb6a10 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 11 Jul 2024 00:53:12 -0700 Subject: [PATCH 098/106] sched: only error when over-allocating system memory (#5626) --- llm/server.go | 9 +++++++++ server/sched.go | 37 ------------------------------------- 2 files changed, 9 insertions(+), 37 deletions(-) diff --git a/llm/server.go b/llm/server.go index aa504d193..07c58cfff 100644 --- a/llm/server.go +++ b/llm/server.go @@ -122,6 +122,15 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } } + // On linux, over-allocating CPU memory will almost always result in an error + if runtime.GOOS == "linux" { + systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize + if systemMemoryRequired > systemTotalMemory { + slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "system", format.HumanBytes2(systemTotalMemory)) + return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(systemTotalMemory)) + } + } + estimate.log() // Loop through potential servers diff --git a/server/sched.go b/server/sched.go index 48047bfec..2daed3abb 100644 --- a/server/sched.go +++ b/server/sched.go @@ -135,11 +135,6 @@ func (s *Scheduler) processPending(ctx context.Context) { } for { - cpus := s.getCpuFn() - var systemMem gpu.GpuInfo - if len(cpus) > 0 { - systemMem = cpus[0] - } var runnerToExpire *runnerRef s.loadedMu.Lock() runner := s.loaded[pending.model.ModelPath] @@ -193,38 +188,6 @@ func (s *Scheduler) processPending(ctx context.Context) { break } - estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts) - maxSize := systemMem.FreeMemory - - // Add available GPU memory to the total pool - // macOS hardware has unified memory so don't double count - if runtime.GOOS != "darwin" { - for _, gpu := range gpus { - if gpu.Library == "cpu" { - continue - } - if loadedCount == 0 { - // If no other models are loaded, set the limit based on what's available - maxSize += gpu.FreeMemory - } else { - // Other models could be unloaded, favor total memory for limit - maxSize += gpu.TotalMemory - } - } - } - - // Block attempting to load a model larger than system memory + GPU memory - if estimate.TotalSize > maxSize { - slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize)) - - // Linux will crash if over-allocating memory - return an error to the user. - // TODO (jmorganca): add reasonable upper limits for darwin and windows as well - if runtime.GOOS == "linux" { - pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) - break - } - } - // Evaluate if the model will fit in the available system memory, or if we should unload a model first if len(gpus) == 1 && gpus[0].Library == "cpu" { // simplifying assumption of defaultParallel when in CPU mode From e64f9ebb44b584d94094274f62acd90a5195dd89 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 11 Jul 2024 13:10:13 -0700 Subject: [PATCH 099/106] do no automatically aggregate system messages --- template/template.go | 39 ++++++++++++++++++++------------------- template/template_test.go | 11 +++++++---- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/template/template.go b/template/template.go index 0b8f24348..8d5ac51b8 100644 --- a/template/template.go +++ b/template/template.go @@ -102,8 +102,21 @@ var response = parse.ActionNode{ }, } +var funcs = template.FuncMap{ + "aggregate": func(v []*api.Message, role string) string { + var aggregated []string + for _, m := range v { + if m.Role == role { + aggregated = append(aggregated, m.Content) + } + } + + return strings.Join(aggregated, "\n\n") + }, +} + func Parse(s string) (*Template, error) { - tmpl := template.New("").Option("missingkey=zero") + tmpl := template.New("").Option("missingkey=zero").Funcs(funcs) tmpl, err := tmpl.Parse(s) if err != nil { @@ -149,23 +162,21 @@ type Values struct { } func (t *Template) Execute(w io.Writer, v Values) error { - system, collated := collate(v.Messages) + collated := collate(v.Messages) if !v.forceLegacy && slices.Contains(t.Vars(), "messages") { return t.Template.Execute(w, map[string]any{ - "System": system, "Messages": collated, }) } var b bytes.Buffer - var prompt, response string + var system, prompt, response string for i, m := range collated { switch m.Role { + case "system": + system = m.Content case "user": prompt = m.Content - if i != 0 { - system = "" - } case "assistant": response = m.Content } @@ -179,6 +190,7 @@ func (t *Template) Execute(w io.Writer, v Values) error { return err } + system = "" prompt = "" response = "" } @@ -209,25 +221,14 @@ func (t *Template) Execute(w io.Writer, v Values) error { return err } -type messages []*api.Message - // collate messages based on role. consecutive messages of the same role are merged // into a single message. collate also pulls out and merges messages with Role == "system" // which are templated separately. As a side effect, it mangles message content adding image // tags ([img-%d]) as needed -func collate(msgs []api.Message) (system string, collated messages) { +func collate(msgs []api.Message) (collated []*api.Message) { var n int for i := range msgs { msg := msgs[i] - if msg.Role == "system" { - if system != "" { - system += "\n\n" - } - - system += msg.Content - continue - } - for range msg.Images { imageTag := fmt.Sprintf("[img-%d]", n) if !strings.Contains(msg.Content, "[img]") { diff --git a/template/template_test.go b/template/template_test.go index e702a1862..b020eb67a 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -122,6 +122,7 @@ func TestTemplate(t *testing.T) { }) t.Run("legacy", func(t *testing.T) { + t.Skip("legacy outputs are currently default outputs") var legacy bytes.Buffer if err := tmpl.Execute(&legacy, Values{Messages: tt, forceLegacy: true}); err != nil { t.Fatal(err) @@ -154,11 +155,13 @@ func TestParse(t *testing.T) { {"{{ .System }} {{ .Prompt }} {{ .Response }}", []string{"prompt", "response", "system"}}, {"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "response", "system", "tools"}}, {"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}}, - {"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}}, + {`{{- range .Messages }} +{{- if eq .Role "system" }}SYSTEM: +{{- else if eq .Role "user" }}USER: +{{- else if eq .Role "assistant" }}ASSISTANT: +{{- end }} {{ .Content }} +{{- end }}`, []string{"content", "messages", "role"}}, {`{{- if .Messages }} -{{- if .System }}<|im_start|>system -{{ .System }}<|im_end|> -{{ end }} {{- range .Messages }}<|im_start|>{{ .Role }} {{ .Content }}<|im_end|> {{ end }}<|im_start|>assistant From 57ec6901eb59cca9d0c29adca3f0fd4b95c1c989 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 11 Jul 2024 13:11:40 -0700 Subject: [PATCH 100/106] revert embedded templates to use prompt/response This reverts commit 19753c18c01183b4c974e36e89b0c7cbdcc3c38a. for compat. messages will be added at a later date --- server/routes_create_test.go | 4 +- template/alfred.gotmpl | 9 +-- template/alpaca.gotmpl | 13 ---- template/chatml.gotmpl | 9 --- template/chatqa.gotmpl | 12 ---- template/codellama-70b-instruct.gotmpl | 15 +---- template/falcon-instruct.gotmpl | 10 ---- template/gemma-instruct.gotmpl | 12 ---- template/granite-instruct.gotmpl | 14 ----- template/llama2-chat.gotmpl | 18 ++---- template/llama3-instruct.gotmpl | 14 +---- template/magicoder.gotmpl | 13 ---- template/mistral-instruct.gotmpl | 13 +--- template/openchat.gotmpl | 12 +--- template/phi-3.gotmpl | 9 --- template/solar-instruct.gotmpl | 14 ----- template/starcoder2-instruct.gotmpl | 15 ----- template/template_test.go | 59 ++++++++++++------- .../system-user-assistant-user | 4 +- .../llama2-chat.gotmpl/user-assistant-user | 4 +- .../system-user-assistant-user | 5 +- template/vicuna.gotmpl | 11 ---- template/zephyr.gotmpl | 9 --- 23 files changed, 63 insertions(+), 235 deletions(-) diff --git a/server/routes_create_test.go b/server/routes_create_test.go index 40477937b..04174b92e 100644 --- a/server/routes_create_test.go +++ b/server/routes_create_test.go @@ -546,8 +546,8 @@ func TestCreateDetectTemplate(t *testing.T) { checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{ filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"), - filepath.Join(p, "blobs", "sha256-68b0323b2f21572bc09ba07554b16b379a5713ee48ef8c25a7661a1f71cfce77"), - filepath.Join(p, "blobs", "sha256-eb72fb7c550ee1f1dec4039bd65382acecf5f7536a30fb7ccace39a8d0cb590b"), + filepath.Join(p, "blobs", "sha256-c608dc615584cd20d9d830363dabf8a4783ae5d34245c3d8c115edb3bc7b28e4"), + filepath.Join(p, "blobs", "sha256-f836ee110db21567f826332e4cedd746c06d10664fd5a9ea3659e3683a944510"), }) }) diff --git a/template/alfred.gotmpl b/template/alfred.gotmpl index 71bc6706f..cecb9d2c8 100644 --- a/template/alfred.gotmpl +++ b/template/alfred.gotmpl @@ -1,8 +1 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} -{{- end }} -{{- range .Messages }}{{ .Content }} -{{- end }} -{{- else -}} -{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} -{{- end -}} \ No newline at end of file +{{ if .System }}{{ .System }}{{ end }}{{ if .Prompt }}{{ .Prompt }}{{ end }}{{ .Response }} \ No newline at end of file diff --git a/template/alpaca.gotmpl b/template/alpaca.gotmpl index e9becb3d4..ec7a8edcb 100644 --- a/template/alpaca.gotmpl +++ b/template/alpaca.gotmpl @@ -1,15 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### Instruction: -{{- else if eq .Role "assistant" }}### Response: -{{- end }} -{{ .Content }} - -{{ end }}### Response: -{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction: @@ -18,4 +6,3 @@ {{ end }}### Response: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/chatml.gotmpl b/template/chatml.gotmpl index eb8ab0dcd..fb672601a 100644 --- a/template/chatml.gotmpl +++ b/template/chatml.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|im_start|>system -{{ .System }}<|im_end|> -{{ end }} -{{- range .Messages }}<|im_start|>{{ .Role }} -{{ .Content }}<|im_end|> -{{ end }}<|im_start|>assistant -{{ else -}} {{ if .System }}<|im_start|>system {{ .System }}<|im_end|> {{ end }}{{ if .Prompt }}<|im_start|>user {{ .Prompt }}<|im_end|> {{ end }}<|im_start|>assistant {{ .Response }}<|im_end|> -{{ end -}} \ No newline at end of file diff --git a/template/chatqa.gotmpl b/template/chatqa.gotmpl index 41c6ced59..91679a72d 100644 --- a/template/chatqa.gotmpl +++ b/template/chatqa.gotmpl @@ -1,18 +1,6 @@ -{{- if .Messages }} -{{- if .System }}System: {{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}User: -{{- else if eq .Role "assistant" }}Assistant: -{{- end }} {{ .Content }} - -{{ end }}Assistant: -{{- else -}} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} {{ end }}Assistant: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/codellama-70b-instruct.gotmpl b/template/codellama-70b-instruct.gotmpl index 0a313d389..e5856042c 100644 --- a/template/codellama-70b-instruct.gotmpl +++ b/template/codellama-70b-instruct.gotmpl @@ -1,19 +1,10 @@ -{{- if .Messages }} -{{- if .System }}Source: system - - {{ .System }} {{ end }} -{{- range .Messages }}Source: {{ .Role }} - - {{ .Content }} {{ end }}Source: assistant -Destination: user - - {{ else -}} {{ if .System }}Source: system {{ .System }} {{ end }}Source: user {{ .Prompt }} Source: assistant +{{- if not .Response }} Destination: user +{{- end }} - {{ .Response }} -{{- end -}} \ No newline at end of file + {{ .Response }} \ No newline at end of file diff --git a/template/falcon-instruct.gotmpl b/template/falcon-instruct.gotmpl index 3a403007e..0a5fe48e8 100644 --- a/template/falcon-instruct.gotmpl +++ b/template/falcon-instruct.gotmpl @@ -1,15 +1,5 @@ -{{- if .Messages }} -{{- if .System }}System: {{ .System }} -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}User: -{{ else if eq .Role "assistant" }}Falcon: -{{ end }}{{ .Content }} -{{ end }}Falcon: -{{ else -}} {{ if .System }}System: {{ .System }} {{ end }}{{ if .Prompt }}User: {{ .Prompt }} {{ end }}Falcon: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/gemma-instruct.gotmpl b/template/gemma-instruct.gotmpl index 6d778a70f..3c3a84256 100644 --- a/template/gemma-instruct.gotmpl +++ b/template/gemma-instruct.gotmpl @@ -1,17 +1,5 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}user -{{- if and $.System (eq $index 0) }} -{{ $.System }} -{{- end }} -{{- else if eq .Role "assistant" }}model -{{- end }} -{{ .Content }} -{{ end }}model -{{ else -}} user {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }} model {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/granite-instruct.gotmpl b/template/granite-instruct.gotmpl index 4a85a97be..56690fce6 100644 --- a/template/granite-instruct.gotmpl +++ b/template/granite-instruct.gotmpl @@ -1,16 +1,3 @@ -{{- if .Messages }} -{{- if .System }}System: -{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}Question: -{{- else if eq .Role "assistant" }}Answer: -{{- end }} -{{ .Content }} - -{{ end }}Answer: -{{ else -}} {{ if .System }}System: {{ .System }} @@ -20,4 +7,3 @@ {{ end }}Answer: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/llama2-chat.gotmpl b/template/llama2-chat.gotmpl index 1816fefd8..013b414e2 100644 --- a/template/llama2-chat.gotmpl +++ b/template/llama2-chat.gotmpl @@ -1,16 +1,6 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if eq $index 0 }}<> -{{- if $.System }} -{{ $.System }} +[INST] <> +{{- if .System }} +{{ .System }} {{ end }}<> -{{ end }}{{ .Content }} -{{- else }} [/INST] {{ .Content }} -{{- end }} -{{- end }} [/INST] -{{- else -}} -[INST] <>{{ if .System }}{{ .System }}{{ end }}<> - -{{ .Prompt }} [/INST] {{ .Response }} -{{- end -}} \ No newline at end of file +{{ .Prompt }} [/INST] {{ .Response }} \ No newline at end of file diff --git a/template/llama3-instruct.gotmpl b/template/llama3-instruct.gotmpl index 7947b8da5..36d0218b6 100644 --- a/template/llama3-instruct.gotmpl +++ b/template/llama3-instruct.gotmpl @@ -1,19 +1,7 @@ -{{- if .Messages }} -{{- if .System }}<|start_header_id|>system<|end_header_id|> - -{{ .System }}<|eot_id|> -{{- end }} -{{- range .Messages }}<|start_header_id|>{{ .Role }}<|end_header_id|> - -{{ .Content }}<|eot_id|> -{{- end }}<|start_header_id|>assistant<|end_header_id|> - -{{ else -}} {{ if .System }}<|start_header_id|>system<|end_header_id|> {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> -{{ .Response }}<|eot_id|> -{{- end -}} \ No newline at end of file +{{ .Response }}<|eot_id|> \ No newline at end of file diff --git a/template/magicoder.gotmpl b/template/magicoder.gotmpl index 9227b6661..52abc01aa 100644 --- a/template/magicoder.gotmpl +++ b/template/magicoder.gotmpl @@ -1,15 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}@@ Instruction -{{- else if eq .Role "assistant" }}@@ Response -{{- end }} -{{ .Content }} - -{{ end }}@@ Response -{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}@@ Instruction @@ -18,4 +6,3 @@ {{ end }}@@ Response {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/mistral-instruct.gotmpl b/template/mistral-instruct.gotmpl index 1d746dfd2..e489bd4c5 100644 --- a/template/mistral-instruct.gotmpl +++ b/template/mistral-instruct.gotmpl @@ -1,10 +1,3 @@ -{{- if .Messages }} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and $.System (eq (len (slice $.Messages $index)) 1) }}{{ $.System }} -{{ end }}{{ .Content }} -{{- else if eq .Role "assistant" }}[/INST] {{ .Content }} -{{- end }} -{{- end }}[/INST] -{{- else -}} -[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }} -{{- end -}} \ No newline at end of file +[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }} \ No newline at end of file diff --git a/template/openchat.gotmpl b/template/openchat.gotmpl index 649f0509c..9c1838343 100644 --- a/template/openchat.gotmpl +++ b/template/openchat.gotmpl @@ -1,11 +1 @@ -{{- if .Messages }} -{{- if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|> -{{- end }} -{{- range .Messages }}GPT4 Correct -{{- if eq .Role "user" }} User: -{{- else if eq .Role "assistant" }} Assistant: -{{- end }} {{ .Content }}<|end_of_turn|> -{{- end }}GPT4 Correct Assistant: -{{- else -}} -{{ if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|>{{ end }}GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> -{{- end -}} \ No newline at end of file +{{ if .System }}GPT4 Correct System: {{ .System }}<|end_of_turn|>{{ end }}GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|> \ No newline at end of file diff --git a/template/phi-3.gotmpl b/template/phi-3.gotmpl index 4ca56e952..6c3610dda 100644 --- a/template/phi-3.gotmpl +++ b/template/phi-3.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|system|> -{{ .System }}<|end|> -{{ end }} -{{- range .Messages }}<|{{ .Role }}|> -{{ .Content }}<|end|> -{{ end }}<|assistant|> -{{ else -}} {{ if .System }}<|system|> {{ .System }}<|end|> {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }}<|end|> {{ end }}<|assistant|> {{ .Response }}<|end|> -{{ end -}} \ No newline at end of file diff --git a/template/solar-instruct.gotmpl b/template/solar-instruct.gotmpl index 8a8331ca4..1c14960d4 100644 --- a/template/solar-instruct.gotmpl +++ b/template/solar-instruct.gotmpl @@ -1,16 +1,3 @@ -{{- if .Messages }} -{{- if .System }}### System: -{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### User: -{{ .Content }} -{{ else if eq .Role "assistant" }}### Assistant: -{{ .Content }} -{{ end }} -{{ end }}### Assistant: -{{ else -}} {{ if .System }}### System: {{ .System }} @@ -20,4 +7,3 @@ {{ end }}### Assistant: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/starcoder2-instruct.gotmpl b/template/starcoder2-instruct.gotmpl index 17c6ad755..6c93a7abc 100644 --- a/template/starcoder2-instruct.gotmpl +++ b/template/starcoder2-instruct.gotmpl @@ -1,17 +1,3 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}### Instruction -{{ .Content }} - -{{ else if eq .Role "assistant" }}### Response -{{ .Content }}<|endoftext|> - -{{ end }} -{{- end }}### Response -{{ else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}### Instruction @@ -20,4 +6,3 @@ {{ end }}### Response {{ .Response }}<|endoftext|> -{{ end -}} \ No newline at end of file diff --git a/template/template_test.go b/template/template_test.go index b020eb67a..9cfa0beaa 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -116,7 +116,14 @@ func TestTemplate(t *testing.T) { t.Fatal(err) } - if diff := cmp.Diff(actual.Bytes(), expect); diff != "" { + bts := actual.Bytes() + + if slices.Contains([]string{"chatqa.gotmpl", "llama2-chat.gotmpl", "mistral-instruct.gotmpl", "openchat.gotmpl", "vicuna.gotmpl"}, match) && bts[len(bts)-1] == ' ' { + t.Log("removing trailing space from output") + bts = bts[:len(bts)-1] + } + + if diff := cmp.Diff(bts, expect); diff != "" { t.Errorf("mismatch (-got +want):\n%s", diff) } }) @@ -203,11 +210,18 @@ func TestExecuteWithMessages(t *testing.T) { { "mistral", []template{ - {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, - {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} -{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} + {"no response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", `{{- $system := aggregate $.Messages "system" -}} +{{- range $index, $_ := .Messages }} +{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} +{{- $system = "" }} + +{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, }, @@ -223,12 +237,18 @@ func TestExecuteWithMessages(t *testing.T) { { "mistral system", []template{ - {"no response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] `}, - {"response", `[INST] {{ if .System }}{{ .System }}{{ "\n\n" }}{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", ` + {"no response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] `}, + {"response", `[INST] {{ if .System }}{{ .System }} + +{{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, + {"messages", `{{- $system := aggregate $.Messages "system" -}} {{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if and (eq $index 0) $.System }}{{ $.System }}{{ "\n\n" }} -{{- end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} +{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} +{{- $system = "" }} + +{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} {{- end }} {{- end }}`}, }, @@ -256,12 +276,9 @@ Hello friend![/INST] Hello human![INST] What is your name?[/INST] `, {{ .Response }}<|im_end|> `}, {"messages", ` -{{- range $index, $_ := .Messages }} -{{- if and (eq .Role "user") (eq $index 0) $.System }}<|im_start|>system -{{ $.System }}<|im_end|>{{ "\n" }} -{{- end }}<|im_start|>{{ .Role }} -{{ .Content }}<|im_end|>{{ "\n" }} -{{- end }}<|im_start|>assistant +{{- range $index, $_ := .Messages }}<|im_start|>{{ .Role }} +{{ .Content }}<|im_end|> +{{ end }}<|im_start|>assistant `}, }, Values{ @@ -294,9 +311,11 @@ What is your name?<|im_end|> `}, {"messages", ` {{- range .Messages }} -{{- if eq .Role "user" }}Question: {{ .Content }}{{ "\n\n" }} -{{- else if eq .Role "assistant" }}Answer: {{ .Content }}{{ "\n\n" }} -{{- end }} +{{- if eq .Role "user" }}Question: {{ .Content }} + +{{ else if eq .Role "assistant" }}Answer: {{ .Content }} + +{{ end }} {{- end }}Answer: `}, }, Values{ diff --git a/template/testdata/llama2-chat.gotmpl/system-user-assistant-user b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user index fc2679bf0..9db81cb44 100644 --- a/template/testdata/llama2-chat.gotmpl/system-user-assistant-user +++ b/template/testdata/llama2-chat.gotmpl/system-user-assistant-user @@ -2,4 +2,6 @@ You are a helpful assistant. <> -Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] <><> + +I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/llama2-chat.gotmpl/user-assistant-user b/template/testdata/llama2-chat.gotmpl/user-assistant-user index 42b4c5294..ca58954f5 100644 --- a/template/testdata/llama2-chat.gotmpl/user-assistant-user +++ b/template/testdata/llama2-chat.gotmpl/user-assistant-user @@ -1,3 +1,5 @@ [INST] <><> -Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works! [/INST] \ No newline at end of file +Hello, how are you? [/INST] I'm doing great. How can I help you today?[INST] <><> + +I'd like to show off how chat templating works! [/INST] \ No newline at end of file diff --git a/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user index b6b4bf93e..2f1edaec9 100644 --- a/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user +++ b/template/testdata/mistral-instruct.gotmpl/system-user-assistant-user @@ -1,2 +1,3 @@ -[INST] Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] You are a helpful assistant. -I'd like to show off how chat templating works![/INST] \ No newline at end of file +[INST] You are a helpful assistant. + +Hello, how are you?[/INST] I'm doing great. How can I help you today?[INST] I'd like to show off how chat templating works![/INST] \ No newline at end of file diff --git a/template/vicuna.gotmpl b/template/vicuna.gotmpl index 01465b997..515b2fe94 100644 --- a/template/vicuna.gotmpl +++ b/template/vicuna.gotmpl @@ -1,15 +1,4 @@ -{{- if .Messages }} -{{- if .System }}{{ .System }} - -{{ end }} -{{- range .Messages }} -{{- if eq .Role "user" }}USER: {{ .Content }} -{{ else if eq .Role "assistant" }}ASSISTANT: {{ .Content }} -{{ end }} -{{- end }}ASSISTANT: -{{- else -}} {{ if .System }}{{ .System }} {{ end }}{{ if .Prompt }}USER: {{ .Prompt }} {{ end }}ASSISTANT: {{ .Response }} -{{ end -}} \ No newline at end of file diff --git a/template/zephyr.gotmpl b/template/zephyr.gotmpl index 3ca1d1a1c..1f889f267 100644 --- a/template/zephyr.gotmpl +++ b/template/zephyr.gotmpl @@ -1,15 +1,6 @@ -{{- if .Messages }} -{{- if .System }}<|system|> -{{ .System }} -{{ end }} -{{- range .Messages }}<|{{ .Role }}|> -{{ .Content }} -{{ end }}<|assistant|> -{{ else -}} {{ if .System }}<|system|> {{ .System }} {{ end }}{{ if .Prompt }}<|user|> {{ .Prompt }} {{ end }}<|assistant|> {{ .Response }} -{{ end -}} \ No newline at end of file From c4cf8ad55966cc61c73f119ab9cbfaf57264fc81 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 11 Jul 2024 16:42:57 -0700 Subject: [PATCH 101/106] llm: avoid loading model if system memory is too small (#5637) * llm: avoid loading model if system memory is too small * update log * Instrument swap free space On linux and windows, expose how much swap space is available so we can take that into consideration when scheduling models * use `systemSwapFreeMemory` in check --------- Co-authored-by: Daniel Hiltgen --- gpu/gpu.go | 3 +++ gpu/gpu_darwin.go | 1 + gpu/gpu_linux.go | 17 +++++++++-------- gpu/gpu_windows.go | 2 +- gpu/types.go | 1 + llm/server.go | 11 +++++++---- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/gpu/gpu.go b/gpu/gpu.go index 58144991d..6e25cb46d 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -360,14 +360,17 @@ func GetGPUInfo() GpuInfoList { "before", "total", format.HumanBytes2(cpus[0].TotalMemory), "free", format.HumanBytes2(cpus[0].FreeMemory), + "free_swap", format.HumanBytes2(cpus[0].FreeSwap), ), slog.Group( "now", "total", format.HumanBytes2(mem.TotalMemory), "free", format.HumanBytes2(mem.FreeMemory), + "free_swap", format.HumanBytes2(mem.FreeSwap), ), ) cpus[0].FreeMemory = mem.FreeMemory + cpus[0].FreeSwap = mem.FreeSwap } var memInfo C.mem_info_t diff --git a/gpu/gpu_darwin.go b/gpu/gpu_darwin.go index 39d8fcf89..cb066e581 100644 --- a/gpu/gpu_darwin.go +++ b/gpu/gpu_darwin.go @@ -57,6 +57,7 @@ func GetCPUMem() (memInfo, error) { return memInfo{ TotalMemory: uint64(C.getPhysicalMemory()), FreeMemory: uint64(C.getFreeMemory()), + // FreeSwap omitted as Darwin uses dynamic paging }, nil } diff --git a/gpu/gpu_linux.go b/gpu/gpu_linux.go index a099bf822..0d08ce8da 100644 --- a/gpu/gpu_linux.go +++ b/gpu/gpu_linux.go @@ -50,7 +50,7 @@ var OneapiMgmtName = "libze_intel_gpu.so" func GetCPUMem() (memInfo, error) { var mem memInfo - var total, available, free, buffers, cached uint64 + var total, available, free, buffers, cached, freeSwap uint64 f, err := os.Open("/proc/meminfo") if err != nil { return mem, err @@ -70,20 +70,21 @@ func GetCPUMem() (memInfo, error) { _, err = fmt.Sscanf(line, "Buffers:%d", &buffers) case strings.HasPrefix(line, "Cached:"): _, err = fmt.Sscanf(line, "Cached:%d", &cached) + case strings.HasPrefix(line, "SwapFree:"): + _, err = fmt.Sscanf(line, "SwapFree:%d", &freeSwap) default: continue } if err != nil { return mem, err } - - if total > 0 && available > 0 { - mem.TotalMemory = total * format.KibiByte - mem.FreeMemory = available * format.KibiByte - return mem, nil - } } mem.TotalMemory = total * format.KibiByte - mem.FreeMemory = (free + buffers + cached) * format.KibiByte + mem.FreeSwap = freeSwap * format.KibiByte + if available > 0 { + mem.FreeMemory = available * format.KibiByte + } else { + mem.FreeMemory = (free + buffers + cached) * format.KibiByte + } return mem, nil } diff --git a/gpu/gpu_windows.go b/gpu/gpu_windows.go index f8c2e76fe..cd0629da4 100644 --- a/gpu/gpu_windows.go +++ b/gpu/gpu_windows.go @@ -51,5 +51,5 @@ func GetCPUMem() (memInfo, error) { if r1 == 0 { return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err) } - return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys}, nil + return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys, FreeSwap: memStatus.AvailPageFile}, nil } diff --git a/gpu/types.go b/gpu/types.go index 7a7749b8e..8d22b06bf 100644 --- a/gpu/types.go +++ b/gpu/types.go @@ -10,6 +10,7 @@ import ( type memInfo struct { TotalMemory uint64 `json:"total_memory,omitempty"` FreeMemory uint64 `json:"free_memory,omitempty"` + FreeSwap uint64 `json:"free_swap,omitempty"` } // Beginning of an `ollama info` command diff --git a/llm/server.go b/llm/server.go index 07c58cfff..8f37aa23a 100644 --- a/llm/server.go +++ b/llm/server.go @@ -88,6 +88,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr var estimate MemoryEstimate var systemTotalMemory uint64 var systemFreeMemory uint64 + var systemSwapFreeMemory uint64 systemMemInfo, err := gpu.GetCPUMem() if err != nil { @@ -95,7 +96,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr } else { systemTotalMemory = systemMemInfo.TotalMemory systemFreeMemory = systemMemInfo.FreeMemory - slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory) + systemSwapFreeMemory = systemMemInfo.FreeSwap + slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory)) } // If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info @@ -125,9 +127,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr // On linux, over-allocating CPU memory will almost always result in an error if runtime.GOOS == "linux" { systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize - if systemMemoryRequired > systemTotalMemory { - slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "system", format.HumanBytes2(systemTotalMemory)) - return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(systemTotalMemory)) + available := min(systemTotalMemory, systemFreeMemory+systemSwapFreeMemory) + if systemMemoryRequired > available { + slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory)) + return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available)) } } From 5056bb9c010f06316b0ff280b879b9c36a7c995c Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Thu, 11 Jul 2024 16:06:57 -0700 Subject: [PATCH 102/106] rename aggregate to contents --- template/template.go | 11 ++++++----- template/template_test.go | 37 +++++++++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/template/template.go b/template/template.go index 8d5ac51b8..21e1614d0 100644 --- a/template/template.go +++ b/template/template.go @@ -103,15 +103,16 @@ var response = parse.ActionNode{ } var funcs = template.FuncMap{ - "aggregate": func(v []*api.Message, role string) string { - var aggregated []string + // contents returns the contents of messages with an optional role filter + "contents": func(v []*api.Message, role ...string) string { + var parts []string for _, m := range v { - if m.Role == role { - aggregated = append(aggregated, m.Content) + if len(role) == 0 || role[0] == "" || m.Role == role[0] { + parts = append(parts, m.Content) } } - return strings.Join(aggregated, "\n\n") + return strings.Join(parts, "\n\n") }, } diff --git a/template/template_test.go b/template/template_test.go index 9cfa0beaa..5e5f42570 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -216,7 +216,7 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- $system := aggregate $.Messages "system" -}} + {"messages", `{{- $system := contents .Messages "system" -}} {{- range $index, $_ := .Messages }} {{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} {{- $system = "" }} @@ -243,7 +243,7 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- $system := aggregate $.Messages "system" -}} + {"messages", `{{- $system := contents .Messages "system" -}} {{- range $index, $_ := .Messages }} {{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} {{- $system = "" }} @@ -363,3 +363,36 @@ Answer: `, }) } } + +func TestFuncs(t *testing.T) { + t.Run("contents", func(t *testing.T) { + cases := map[string]string{ + "": "A\n\nB\n\nC\n\nD\n\nE\n\nF", + "system": "A\n\nF", + "user": "B\n\nE", + "assistant": "C\n\nD", + } + + s := []*api.Message{ + {Role: "system", Content: "A"}, + {Role: "user", Content: "B"}, + {Role: "assistant", Content: "C"}, + {Role: "assistant", Content: "D"}, + {Role: "user", Content: "E"}, + {Role: "system", Content: "F"}, + } + + fn, ok := funcs["contents"].(func([]*api.Message, ...string) string) + if !ok { + t.Fatal("contents is not a function") + } + + for k, v := range cases { + t.Run(k, func(t *testing.T) { + if diff := cmp.Diff(fn(s, k), v); diff != "" { + t.Errorf("mismatch (-got +want):\n%s", diff) + } + }) + } + }) +} From 10e768826c7d5a8f7d7fab13832299a466a01f87 Mon Sep 17 00:00:00 2001 From: Josh <76125168+joshyan1@users.noreply.github.com> Date: Thu, 11 Jul 2024 17:24:29 -0700 Subject: [PATCH 103/106] fix: quant err message (#5616) --- llm/llm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/llm.go b/llm/llm.go index f2a5e557a..d24507cce 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -33,7 +33,7 @@ func Quantize(infile, outfile string, ftype fileType) error { params.ftype = ftype.Value() if rc := C.llama_model_quantize(cinfile, coutfile, ¶ms); rc != 0 { - return fmt.Errorf("llama_model_quantize: %d", rc) + return fmt.Errorf("failed to quantize model. This model architecture may not be supported, or you may need to upgrade Ollama to the latest version") } return nil From 179737feb7311fc57c507a93378a3ac15da3a346 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 11 Jul 2024 22:53:46 -0700 Subject: [PATCH 104/106] Clean up old files when installing on Windows (#5645) * app: always clean up install dir; force close applications * remove wildcard * revert `CloseApplications` * whitespace * update `LOCALAPPDATA` var --- app/ollama.iss | 3 +++ 1 file changed, 3 insertions(+) diff --git a/app/ollama.iss b/app/ollama.iss index e6502abd3..fef4a7b25 100644 --- a/app/ollama.iss +++ b/app/ollama.iss @@ -127,6 +127,9 @@ Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\models" Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\history" ; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved +[InstallDelete] +Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama" + [Messages] WizardReady=Ollama Windows Preview ReadyLabel1=%nLet's get you up and running with your own large language models. From 36c87c433b7d880ef8b3a2b05ef93b0cd1675520 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 12 Jul 2024 11:48:06 -0700 Subject: [PATCH 105/106] template: preprocess message and collect system --- template/template.go | 37 +++++++++++---------------- template/template_test.go | 53 ++++++--------------------------------- 2 files changed, 23 insertions(+), 67 deletions(-) diff --git a/template/template.go b/template/template.go index 21e1614d0..9b3516665 100644 --- a/template/template.go +++ b/template/template.go @@ -102,22 +102,8 @@ var response = parse.ActionNode{ }, } -var funcs = template.FuncMap{ - // contents returns the contents of messages with an optional role filter - "contents": func(v []*api.Message, role ...string) string { - var parts []string - for _, m := range v { - if len(role) == 0 || role[0] == "" || m.Role == role[0] { - parts = append(parts, m.Content) - } - } - - return strings.Join(parts, "\n\n") - }, -} - func Parse(s string) (*Template, error) { - tmpl := template.New("").Option("missingkey=zero").Funcs(funcs) + tmpl := template.New("").Option("missingkey=zero") tmpl, err := tmpl.Parse(s) if err != nil { @@ -163,15 +149,16 @@ type Values struct { } func (t *Template) Execute(w io.Writer, v Values) error { - collated := collate(v.Messages) + system, collated := collate(v.Messages) if !v.forceLegacy && slices.Contains(t.Vars(), "messages") { return t.Template.Execute(w, map[string]any{ + "System": system, "Messages": collated, }) } var b bytes.Buffer - var system, prompt, response string + var prompt, response string for i, m := range collated { switch m.Role { case "system": @@ -223,11 +210,13 @@ func (t *Template) Execute(w io.Writer, v Values) error { } // collate messages based on role. consecutive messages of the same role are merged -// into a single message. collate also pulls out and merges messages with Role == "system" -// which are templated separately. As a side effect, it mangles message content adding image -// tags ([img-%d]) as needed -func collate(msgs []api.Message) (collated []*api.Message) { +// into a single message. collate also collects and returns all system messages. +// collate mutates message content adding image tags ([img-%d]) as needed +func collate(msgs []api.Message) (string, []*api.Message) { var n int + + var system []string + var collated []*api.Message for i := range msgs { msg := msgs[i] for range msg.Images { @@ -240,6 +229,10 @@ func collate(msgs []api.Message) (collated []*api.Message) { n++ } + if msg.Role == "system" { + system = append(system, msg.Content) + } + if len(collated) > 0 && collated[len(collated)-1].Role == msg.Role { collated[len(collated)-1].Content += "\n\n" + msg.Content } else { @@ -247,7 +240,7 @@ func collate(msgs []api.Message) (collated []*api.Message) { } } - return + return strings.Join(system, "\n\n"), collated } func parseNode(n parse.Node) []string { diff --git a/template/template_test.go b/template/template_test.go index 5e5f42570..c678f1b12 100644 --- a/template/template_test.go +++ b/template/template_test.go @@ -216,13 +216,11 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- $system := contents .Messages "system" -}} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} -{{- $system = "" }} + {"messages", `[INST] {{ if .System }}{{ .System }} -{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} -{{- end }} +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }}[INST] {{ end }} {{- end }}`}, }, Values{ @@ -243,13 +241,11 @@ func TestExecuteWithMessages(t *testing.T) { {"response", `[INST] {{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}[/INST] {{ .Response }}`}, - {"messages", `{{- $system := contents .Messages "system" -}} -{{- range $index, $_ := .Messages }} -{{- if eq .Role "user" }}[INST] {{ if $system }}{{ $system }} -{{- $system = "" }} + {"messages", `[INST] {{ if .System }}{{ .System }} -{{ end }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }} -{{- end }} +{{ end }} +{{- range .Messages }} +{{- if eq .Role "user" }}{{ .Content }}[/INST] {{ else if eq .Role "assistant" }}{{ .Content }}[INST] {{ end }} {{- end }}`}, }, Values{ @@ -363,36 +359,3 @@ Answer: `, }) } } - -func TestFuncs(t *testing.T) { - t.Run("contents", func(t *testing.T) { - cases := map[string]string{ - "": "A\n\nB\n\nC\n\nD\n\nE\n\nF", - "system": "A\n\nF", - "user": "B\n\nE", - "assistant": "C\n\nD", - } - - s := []*api.Message{ - {Role: "system", Content: "A"}, - {Role: "user", Content: "B"}, - {Role: "assistant", Content: "C"}, - {Role: "assistant", Content: "D"}, - {Role: "user", Content: "E"}, - {Role: "system", Content: "F"}, - } - - fn, ok := funcs["contents"].(func([]*api.Message, ...string) string) - if !ok { - t.Fatal("contents is not a function") - } - - for k, v := range cases { - t.Run(k, func(t *testing.T) { - if diff := cmp.Diff(fn(s, k), v); diff != "" { - t.Errorf("mismatch (-got +want):\n%s", diff) - } - }) - } - }) -} From 33627331a370755ff5033c0fcd71d1c9210c9d96 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 12 Jul 2024 12:29:23 -0700 Subject: [PATCH 106/106] app: also clean up tempdir runners on install (#5646) --- app/ollama.iss | 1 + 1 file changed, 1 insertion(+) diff --git a/app/ollama.iss b/app/ollama.iss index fef4a7b25..6bedb9ff7 100644 --- a/app/ollama.iss +++ b/app/ollama.iss @@ -128,6 +128,7 @@ Type: filesandordirs; Name: "{%USERPROFILE}\.ollama\history" ; NOTE: if the user has a custom OLLAMA_MODELS it will be preserved [InstallDelete] +Type: filesandordirs; Name: "{%TEMP}\ollama*" Type: filesandordirs; Name: "{%LOCALAPPDATA}\Programs\Ollama" [Messages]