From 97c9e11768292c8f2732e2f4c9cde72a604c936b Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 28 Jun 2024 09:57:10 -0700 Subject: [PATCH 01/33] Switch use_mmap to a pointer type This uses nil as undefined for a cleaner implementation. --- api/types.go | 105 ++++++++++++++++------------------------------ api/types_test.go | 40 ++++++++++-------- llm/server.go | 11 ++--- 3 files changed, 63 insertions(+), 93 deletions(-) diff --git a/api/types.go b/api/types.go index 95ed5d37e..3b67d57a3 100644 --- a/api/types.go +++ b/api/types.go @@ -159,49 +159,18 @@ type Options struct { // Runner options which must be set when the model is loaded into memory type Runner struct { - UseNUMA bool `json:"numa,omitempty"` - NumCtx int `json:"num_ctx,omitempty"` - NumBatch int `json:"num_batch,omitempty"` - NumGPU int `json:"num_gpu,omitempty"` - MainGPU int `json:"main_gpu,omitempty"` - LowVRAM bool `json:"low_vram,omitempty"` - F16KV bool `json:"f16_kv,omitempty"` - LogitsAll bool `json:"logits_all,omitempty"` - VocabOnly bool `json:"vocab_only,omitempty"` - UseMMap TriState `json:"use_mmap,omitempty"` - UseMLock bool `json:"use_mlock,omitempty"` - NumThread int `json:"num_thread,omitempty"` -} - -type TriState int - -const ( - TriStateUndefined TriState = -1 - TriStateFalse TriState = 0 - TriStateTrue TriState = 1 -) - -func (b *TriState) UnmarshalJSON(data []byte) error { - var v bool - if err := json.Unmarshal(data, &v); err != nil { - return err - } - if v { - *b = TriStateTrue - } - *b = TriStateFalse - return nil -} - -func (b *TriState) MarshalJSON() ([]byte, error) { - if *b == TriStateUndefined { - return nil, nil - } - var v bool - if *b == TriStateTrue { - v = true - } - return json.Marshal(v) + UseNUMA bool `json:"numa,omitempty"` + NumCtx int `json:"num_ctx,omitempty"` + NumBatch int `json:"num_batch,omitempty"` + NumGPU int `json:"num_gpu,omitempty"` + MainGPU int `json:"main_gpu,omitempty"` + LowVRAM bool `json:"low_vram,omitempty"` + F16KV bool `json:"f16_kv,omitempty"` + LogitsAll bool `json:"logits_all,omitempty"` + VocabOnly bool `json:"vocab_only,omitempty"` + UseMMap *bool `json:"use_mmap,omitempty"` + UseMLock bool `json:"use_mlock,omitempty"` + NumThread int `json:"num_thread,omitempty"` } // EmbeddingRequest is the request passed to [Client.Embeddings]. @@ -437,19 +406,6 @@ func (opts *Options) FromMap(m map[string]interface{}) error { continue } - if reflect.PointerTo(field.Type()) == reflect.TypeOf((*TriState)(nil)) { - val, ok := val.(bool) - if !ok { - return fmt.Errorf("option %q must be of type boolean", key) - } - if val { - field.SetInt(int64(TriStateTrue)) - } else { - field.SetInt(int64(TriStateFalse)) - } - continue - } - switch field.Kind() { case reflect.Int: switch t := val.(type) { @@ -496,6 +452,17 @@ func (opts *Options) FromMap(m map[string]interface{}) error { slice[i] = str } field.Set(reflect.ValueOf(slice)) + case reflect.Pointer: + var b bool + if field.Type() == reflect.TypeOf(&b) { + val, ok := val.(bool) + if !ok { + return fmt.Errorf("option %q must be of type boolean", key) + } + field.Set(reflect.ValueOf(&val)) + } else { + return fmt.Errorf("unknown type loading config params: %v %v", field.Kind(), field.Type()) + } default: return fmt.Errorf("unknown type loading config params: %v", field.Kind()) } @@ -538,7 +505,7 @@ func DefaultOptions() Options { LowVRAM: false, F16KV: true, UseMLock: false, - UseMMap: TriStateUndefined, + UseMMap: nil, UseNUMA: false, }, } @@ -608,19 +575,6 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) { } else { field := valueOpts.FieldByName(opt.Name) if field.IsValid() && field.CanSet() { - if reflect.PointerTo(field.Type()) == reflect.TypeOf((*TriState)(nil)) { - boolVal, err := strconv.ParseBool(vals[0]) - if err != nil { - return nil, fmt.Errorf("invalid bool value %s", vals) - } - if boolVal { - out[key] = TriStateTrue - } else { - out[key] = TriStateFalse - } - continue - } - switch field.Kind() { case reflect.Float32: floatVal, err := strconv.ParseFloat(vals[0], 32) @@ -648,6 +602,17 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) { case reflect.Slice: // TODO: only string slices are supported right now out[key] = vals + case reflect.Pointer: + var b bool + if field.Type() == reflect.TypeOf(&b) { + boolVal, err := strconv.ParseBool(vals[0]) + if err != nil { + return nil, fmt.Errorf("invalid bool value %s", vals) + } + out[key] = &boolVal + } else { + return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key) + } default: return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key) } diff --git a/api/types_test.go b/api/types_test.go index 8b6c60c62..c60ed90e0 100644 --- a/api/types_test.go +++ b/api/types_test.go @@ -108,25 +108,27 @@ func TestDurationMarshalUnmarshal(t *testing.T) { } func TestUseMmapParsingFromJSON(t *testing.T) { + tr := true + fa := false tests := []struct { name string req string - exp TriState + exp *bool }{ { name: "Undefined", req: `{ }`, - exp: TriStateUndefined, + exp: nil, }, { name: "True", req: `{ "use_mmap": true }`, - exp: TriStateTrue, + exp: &tr, }, { name: "False", req: `{ "use_mmap": false }`, - exp: TriStateFalse, + exp: &fa, }, } @@ -144,50 +146,52 @@ func TestUseMmapParsingFromJSON(t *testing.T) { } func TestUseMmapFormatParams(t *testing.T) { + tr := true + fa := false tests := []struct { name string req map[string][]string - exp TriState + exp *bool err error }{ { name: "True", req: map[string][]string{ - "use_mmap": []string{"true"}, + "use_mmap": {"true"}, }, - exp: TriStateTrue, + exp: &tr, err: nil, }, { name: "False", req: map[string][]string{ - "use_mmap": []string{"false"}, + "use_mmap": {"false"}, }, - exp: TriStateFalse, + exp: &fa, err: nil, }, { name: "Numeric True", req: map[string][]string{ - "use_mmap": []string{"1"}, + "use_mmap": {"1"}, }, - exp: TriStateTrue, + exp: &tr, err: nil, }, { name: "Numeric False", req: map[string][]string{ - "use_mmap": []string{"0"}, + "use_mmap": {"0"}, }, - exp: TriStateFalse, + exp: &fa, err: nil, }, { name: "invalid string", req: map[string][]string{ - "use_mmap": []string{"foo"}, + "use_mmap": {"foo"}, }, - exp: TriStateUndefined, + exp: nil, err: fmt.Errorf("invalid bool value [foo]"), }, } @@ -195,11 +199,11 @@ func TestUseMmapFormatParams(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { resp, err := FormatParams(test.req) - require.Equal(t, err, test.err) + require.Equal(t, test.err, err) respVal, ok := resp["use_mmap"] - if test.exp != TriStateUndefined { + if test.exp != nil { assert.True(t, ok, "resp: %v", resp) - assert.Equal(t, test.exp, respVal) + assert.Equal(t, *test.exp, *respVal.(*bool)) } }) } diff --git a/llm/server.go b/llm/server.go index 61346069e..821f6efdc 100644 --- a/llm/server.go +++ b/llm/server.go @@ -208,7 +208,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr if g.Library == "metal" && uint64(opts.NumGPU) > 0 && uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 { - opts.UseMMap = api.TriStateFalse + opts.UseMMap = new(bool) + *opts.UseMMap = false } } @@ -219,10 +220,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr // Windows CUDA should not use mmap for best performance // Linux with a model larger than free space, mmap leads to thrashing // For CPU loads we want the memory to be allocated, not FS cache - if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) || - (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) || - (gpus[0].Library == "cpu" && opts.UseMMap == api.TriStateUndefined) || - opts.UseMMap == api.TriStateFalse { + if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) || + (runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) || + (gpus[0].Library == "cpu" && opts.UseMMap == nil) || + (opts.UseMMap != nil && !*opts.UseMMap) { params = append(params, "--no-mmap") } From ef757da2c90ad52f35c95688095dfd84655cceb7 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 10:30:07 -0700 Subject: [PATCH 02/33] Better nvidia GPU discovery logging Refine the way we log GPU discovery to improve the non-debug output, and report more actionable log messages when possible to help users troubleshoot on their own. --- docs/troubleshooting.md | 14 +++++++++----- gpu/gpu.go | 23 +++++++++++++++++++++-- gpu/gpu_info_nvcuda.c | 31 ++++++++++++++++--------------- gpu/gpu_info_nvcuda.h | 6 +++++- 4 files changed, 51 insertions(+), 23 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index de29b344c..bbb771831 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -70,14 +70,18 @@ curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/ -## Container fails to run on NVIDIA GPU +## NVIDIA GPU Discovery -Make sure you've set up the container runtime first as described in [docker.md](./docker.md) +When Ollama starts up, it takes inventory of the GPUs present in the system to determine compatibility and how much VRAM is available. Sometimes this discovery can fail to find your GPUs. In general, running the latest driver will yield the best results. -Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem +### Linux NVIDIA Troubleshooting -- Is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU. -- Is the uvm driver not loaded? `sudo nvidia-modprobe -u` +If you are using a container to run Ollama, make sure you've set up the container runtime first as described in [docker.md](./docker.md) + +Sometimes the Ollama can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem + +- If you are using a container, is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU. +- Is the uvm driver loaded? `sudo nvidia-modprobe -u` - Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm` - Try rebooting - Make sure you're running the latest nvidia drivers diff --git a/gpu/gpu.go b/gpu/gpu.go index 583bb79c6..29a3c1037 100644 --- a/gpu/gpu.go +++ b/gpu/gpu.go @@ -202,7 +202,7 @@ func GetGPUInfo() GpuInfoList { }() if !bootstrapped { - slog.Debug("Detecting GPUs") + slog.Info("looking for compatible GPUs") needRefresh = false cpuCapability = GetCPUCapability() var memInfo C.mem_info_t @@ -320,6 +320,9 @@ func GetGPUInfo() GpuInfoList { rocmGPUs = AMDGetGPUInfo() bootstrapped = true + if len(cudaGPUs) == 0 && len(rocmGPUs) == 0 && len(oneapiGPUs) == 0 { + slog.Info("no compatible GPUs were discovered") + } } // For detected GPUs, load library if not loaded @@ -514,7 +517,23 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) { defer C.free(unsafe.Pointer(lib)) C.nvcuda_init(lib, &resp) if resp.err != nil { - slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err)) + // Decide what log level based on the type of error message to help users understand why + msg := C.GoString(resp.err) + switch resp.cudaErr { + case C.CUDA_ERROR_INSUFFICIENT_DRIVER, C.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH: + slog.Warn("version mismatch between driver and cuda driver library - reboot or upgrade may be required", "library", libPath, "error", msg) + case C.CUDA_ERROR_NO_DEVICE: + slog.Info("no nvidia devices detected", "library", libPath) + case C.CUDA_ERROR_UNKNOWN: + slog.Warn("unknown error initializing cuda driver library", "library", libPath, "error", msg) + slog.Warn("see https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for more information") + default: + if strings.Contains(msg, "wrong ELF class") { + slog.Debug("skipping 32bit library", "library", libPath) + } else { + slog.Info("unable to load cuda driver library", "library", libPath, "error", msg) + } + } C.free(unsafe.Pointer(resp.err)) } else { return int(resp.num_devices), &resp.ch, libPath diff --git a/gpu/gpu_info_nvcuda.c b/gpu/gpu_info_nvcuda.c index abe140844..a1a38bfc2 100644 --- a/gpu/gpu_info_nvcuda.c +++ b/gpu/gpu_info_nvcuda.c @@ -7,6 +7,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { CUresult ret; resp->err = NULL; resp->num_devices = 0; + resp->cudaErr = CUDA_SUCCESS; const int buflen = 256; char buf[buflen + 1]; int i; @@ -38,6 +39,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { nvcuda_lib_path, msg); free(msg); resp->err = strdup(buf); + resp->cudaErr = -1; return; } @@ -52,6 +54,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { msg); free(msg); resp->err = strdup(buf); + resp->cudaErr = -1; return; } } @@ -61,12 +64,9 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { LOG(resp->ch.verbose, "cuInit err: %d\n", ret); UNLOAD_LIBRARY(resp->ch.handle); resp->ch.handle = NULL; - if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) { - resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama"); - return; - } - snprintf(buf, buflen, "nvcuda init failure: %d", ret); + snprintf(buf, buflen, "cuda driver library init failure: %d", ret); resp->err = strdup(buf); + resp->cudaErr = ret; return; } @@ -91,6 +91,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) { resp->ch.handle = NULL; snprintf(buf, buflen, "unable to get device count: %d", ret); resp->err = strdup(buf); + resp->cudaErr = ret; return; } } @@ -106,13 +107,13 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) { CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; if (h.handle == NULL) { - resp->err = strdup("nvcuda handle isn't initialized"); + resp->err = strdup("cuda driver library handle isn't initialized"); return; } ret = (*h.cuDeviceGet)(&device, i); if (ret != CUDA_SUCCESS) { - snprintf(buf, buflen, "nvcuda device failed to initialize"); + snprintf(buf, buflen, "cuda driver library device failed to initialize"); resp->err = strdup(buf); return; } @@ -168,14 +169,14 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) { // To get memory we have to set (and release) a context ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device); if (ret != CUDA_SUCCESS) { - snprintf(buf, buflen, "nvcuda failed to get device context %d", ret); + snprintf(buf, buflen, "cuda driver library failed to get device context %d", ret); resp->err = strdup(buf); return; } ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total); if (ret != CUDA_SUCCESS) { - snprintf(buf, buflen, "nvcuda device memory info lookup failure %d", ret); + snprintf(buf, buflen, "cuda driver library device memory info lookup failure %d", ret); resp->err = strdup(buf); // Best effort on failure... (*h.cuCtxDestroy)(ctx); @@ -193,7 +194,7 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) { ret = (*h.cuCtxDestroy)(ctx); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda failed to release device context %d", ret); + LOG(1, "cuda driver library failed to release device context %d", ret); } } @@ -206,7 +207,7 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) ret = (*h.cuDeviceGet)(&device, i); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda device failed to initialize"); + LOG(1, "cuda driver library device failed to initialize"); return; } @@ -214,13 +215,13 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) // To get memory we have to set (and release) a context ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda failed to get device context %d", ret); + LOG(1, "cuda driver library failed to get device context %d", ret); return; } ret = (*h.cuMemGetInfo_v2)(free, total); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda device memory info lookup failure %d", ret); + LOG(1, "cuda driver library device memory info lookup failure %d", ret); // Best effort on failure... (*h.cuCtxDestroy)(ctx); return; @@ -228,12 +229,12 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total) ret = (*h.cuCtxDestroy)(ctx); if (ret != CUDA_SUCCESS) { - LOG(1, "nvcuda failed to release device context %d", ret); + LOG(1, "cuda driver library failed to release device context %d", ret); } } void nvcuda_release(nvcuda_handle_t h) { - LOG(h.verbose, "releasing nvcuda library\n"); + LOG(h.verbose, "releasing cuda driver library\n"); UNLOAD_LIBRARY(h.handle); // TODO and other context release logic? h.handle = NULL; diff --git a/gpu/gpu_info_nvcuda.h b/gpu/gpu_info_nvcuda.h index f9654f641..ef2fe8a30 100644 --- a/gpu/gpu_info_nvcuda.h +++ b/gpu/gpu_info_nvcuda.h @@ -7,9 +7,12 @@ typedef enum cudaError_enum { CUDA_SUCCESS = 0, CUDA_ERROR_INVALID_VALUE = 1, - CUDA_ERROR_MEMORY_ALLOCATION = 2, + CUDA_ERROR_OUT_OF_MEMORY = 2, CUDA_ERROR_NOT_INITIALIZED = 3, CUDA_ERROR_INSUFFICIENT_DRIVER = 35, + CUDA_ERROR_NO_DEVICE = 100, + CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803, + CUDA_ERROR_UNKNOWN = 999, // Other values omitted for now... } CUresult; @@ -64,6 +67,7 @@ typedef struct nvcuda_init_resp { char *err; // If err is non-null handle is invalid nvcuda_handle_t ch; int num_devices; + CUresult cudaErr; } nvcuda_init_resp_t; void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp); From 6298f49816c2264f9bb77206ad1b015aa357e381 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 12:37:40 -0700 Subject: [PATCH 03/33] Fix clip model loading with unicode paths On windows, if the model dir contained unicode characters clip models would fail to load. This fixes the file name handling in clip.cpp to support utf16 on windows. --- llm/patches/08-clip-unicode.diff | 42 ++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 llm/patches/08-clip-unicode.diff diff --git a/llm/patches/08-clip-unicode.diff b/llm/patches/08-clip-unicode.diff new file mode 100644 index 000000000..53e5ee115 --- /dev/null +++ b/llm/patches/08-clip-unicode.diff @@ -0,0 +1,42 @@ +diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp +index 95fbe3d0..5a02a6ec 100644 +--- a/examples/llava/clip.cpp ++++ b/examples/llava/clip.cpp +@@ -32,6 +33,14 @@ + #include + #include + ++#if defined(_WIN32) ++#define WIN32_LEAN_AND_MEAN ++#ifndef NOMINMAX ++ #define NOMINMAX ++#endif ++#include ++#endif ++ + //#define CLIP_DEBUG_FUNCTIONS + + // RGB uint8 image +@@ -1055,7 +1064,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { + return nullptr; + } + ++#ifdef _WIN32 ++ int wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, NULL, 0); ++ if (!wlen) { ++ return NULL; ++ } ++ wchar_t * wbuf = (wchar_t *) malloc(wlen * sizeof(wchar_t)); ++ wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, wbuf, wlen); ++ if (!wlen) { ++ free(wbuf); ++ return NULL; ++ } ++ auto fin = std::ifstream(wbuf, std::ios::binary); ++ free(wbuf); ++#else + auto fin = std::ifstream(fname, std::ios::binary); ++#endif + if (!fin) { + LOG_TEE("cannot open model file for loading tensors\n"); + clip_free(new_clip); From 0e982bc1f47cfc7c36f49f925419f9039304925e Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 13:10:14 -0700 Subject: [PATCH 04/33] Fix corner cases on tmp cleaner on mac When ollama is running a long time, tmp cleaners can remove the runners. This tightens up a few corner cases on arm macs where we failed with "server cpu not listed in available servers map[]" --- llm/payload.go | 44 +++++++++++++++++++++++--------------------- llm/server.go | 15 ++++++++++++++- 2 files changed, 37 insertions(+), 22 deletions(-) diff --git a/llm/payload.go b/llm/payload.go index 9296db336..b402e1f24 100644 --- a/llm/payload.go +++ b/llm/payload.go @@ -38,7 +38,7 @@ func Init() error { } var variants []string - for v := range availableServers() { + for v := range getAvailableServers() { variants = append(variants, v) } slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants)) @@ -50,7 +50,7 @@ func Init() error { // binary names may contain an optional variant separated by '_' // For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2" // Any library without a variant is the lowest common denominator -func availableServers() map[string]string { +func getAvailableServers() map[string]string { payloadsDir, err := gpu.PayloadsDir() if err != nil { slog.Error("payload lookup error", "error", err) @@ -80,7 +80,7 @@ func availableServers() map[string]string { // TODO - switch to metadata based mapping func serversForGpu(info gpu.GpuInfo) []string { // glob workDir for files that start with ollama_ - availableServers := availableServers() + availableServers := getAvailableServers() requested := info.Library if info.Variant != gpu.CPUCapabilityNone { requested += "_" + info.Variant.String() @@ -115,27 +115,29 @@ func serversForGpu(info gpu.GpuInfo) []string { servers = append(servers, alt...) } - // Load up the best CPU variant if not primary requested - if info.Library != "cpu" { - variant := gpu.GetCPUCapability() - // If no variant, then we fall back to default - // If we have a variant, try that if we find an exact match - // Attempting to run the wrong CPU instructions will panic the - // process - if variant != gpu.CPUCapabilityNone { - for cmp := range availableServers { - if cmp == "cpu_"+variant.String() { - servers = append(servers, cmp) - break + if !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") { + // Load up the best CPU variant if not primary requested + if info.Library != "cpu" { + variant := gpu.GetCPUCapability() + // If no variant, then we fall back to default + // If we have a variant, try that if we find an exact match + // Attempting to run the wrong CPU instructions will panic the + // process + if variant != gpu.CPUCapabilityNone { + for cmp := range availableServers { + if cmp == "cpu_"+variant.String() { + servers = append(servers, cmp) + break + } } + } else { + servers = append(servers, "cpu") } - } else { - servers = append(servers, "cpu") } - } - if len(servers) == 0 { - servers = []string{"cpu"} + if len(servers) == 0 { + servers = []string{"cpu"} + } } return servers @@ -147,7 +149,7 @@ func serverForCpu() string { return "metal" } variant := gpu.GetCPUCapability() - availableServers := availableServers() + availableServers := getAvailableServers() if variant != gpu.CPUCapabilityNone { for cmp := range availableServers { if cmp == "cpu_"+variant.String() { diff --git a/llm/server.go b/llm/server.go index 8b63cfbd5..4eb30e671 100644 --- a/llm/server.go +++ b/llm/server.go @@ -131,7 +131,20 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr return nil, errors.New("ollama supports only one lora adapter, but multiple were provided") } - availableServers := availableServers() + availableServers := getAvailableServers() + if len(availableServers) == 0 { + if runtime.GOOS != "windows" { + slog.Warn("llama server binary disappeared, reinitializing payloads") + err = Init() + if err != nil { + slog.Warn("failed to reinitialize payloads", "error", err) + return nil, err + } + availableServers = getAvailableServers() + } else { + return nil, finalErr + } + } var servers []string if cpuRunner != "" { servers = []string{cpuRunner} From 3b5a4a77f3a191e368af3412e5de9b38b4f80771 Mon Sep 17 00:00:00 2001 From: royjhan <65097070+royjhan@users.noreply.github.com> Date: Wed, 3 Jul 2024 13:46:23 -0700 Subject: [PATCH 05/33] Return Correct Prompt Eval Count Regardless of Cache Prompt (#5371) * openai compatibility * Revert "openai compatibility" This reverts commit d3f98a811e00fc497d889c8c45b0cfec5b64690c. * remove erroneous subtraction of prompt cache --- llm/ext_server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 3bc012521..099705998 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1732,7 +1732,7 @@ struct llama_server_context slot.n_past -= 1; } - slot.n_prompt_tokens_processed = slot.n_prompt_tokens - slot.n_past; + slot.n_prompt_tokens_processed = slot.n_prompt_tokens; if (slot.ga_n != 1) { From 3c75113e37cc2b5d9ad8cb5c21841437aab482cc Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Wed, 3 Jul 2024 14:47:42 -0700 Subject: [PATCH 06/33] Prevent loading models larger than total memory Users may not realize the siny new model they're trying to load fits on their disk, but can't load into system+GPU memory. Today we crash, but with this fix, we'll give them a better error message before even trying to load it. --- server/sched.go | 26 ++++++++++++++++++++++++++ server/sched_test.go | 12 ++++++++++++ 2 files changed, 38 insertions(+) diff --git a/server/sched.go b/server/sched.go index 71b535ae2..362430986 100644 --- a/server/sched.go +++ b/server/sched.go @@ -139,6 +139,11 @@ func (s *Scheduler) processPending(ctx context.Context) { } for { + cpus := s.getCpuFn() + var systemMem gpu.GpuInfo + if len(cpus) > 0 { + systemMem = cpus[0] + } var runnerToExpire *runnerRef s.loadedMu.Lock() runner := s.loaded[pending.model.ModelPath] @@ -192,6 +197,27 @@ func (s *Scheduler) processPending(ctx context.Context) { break } + // Block attempting to load a model larger than system memory + GPU memory + estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts) + maxSize := systemMem.FreeMemory + for _, gpu := range gpus { + if gpu.Library == "cpu" { + continue + } + if loadedCount == 0 { + // If no other models are loaded, set the limit based on what's available + maxSize += gpu.FreeMemory + } else { + // Other models could be unloaded, favor total memory for limit + maxSize += gpu.TotalMemory + } + } + if estimate.TotalSize > maxSize { + slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize)) + pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) + break + } + // Evaluate if the model will fit in the available system memory, or if we should unload a model first if len(gpus) == 1 && gpus[0].Library == "cpu" { // simplifying assumption of defaultParallel when in CPU mode diff --git a/server/sched_test.go b/server/sched_test.go index be0830a34..83075f749 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -199,6 +199,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario1a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario1a.req.errCh) + case err := <-scenario1a.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -212,6 +214,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario1a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario1b.req.errCh) + case err := <-scenario1b.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -230,6 +234,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario2a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario2a.req.errCh) + case err := <-scenario2a.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -246,6 +252,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario3a.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3a.req.errCh) + case err := <-scenario3a.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -262,6 +270,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario3b.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3b.req.errCh) + case err := <-scenario3b.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } @@ -278,6 +288,8 @@ func TestRequests(t *testing.T) { require.Equal(t, resp.llama, scenario3c.srv) require.Empty(t, s.pendingReqCh) require.Empty(t, scenario3c.req.errCh) + case err := <-scenario3c.req.errCh: + t.Fatal(err.Error()) case <-ctx.Done(): t.Fatal("timeout") } From 955f2a4e035044866277e26abe74343117250f1a Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Tue, 2 Jul 2024 15:12:43 -0700 Subject: [PATCH 07/33] Only set default keep_alive on initial model load This change fixes the handling of keep_alive so that if client request omits the setting, we only set this on initial load. Once the model is loaded, if new requests leave this unset, we'll keep whatever keep_alive was there. --- envconfig/config.go | 31 ++++++++++++++++++++-- envconfig/config_test.go | 17 ++++++++++++ server/routes.go | 57 +++------------------------------------- server/sched.go | 14 +++++++--- server/sched_test.go | 22 ++++++++-------- 5 files changed, 70 insertions(+), 71 deletions(-) diff --git a/envconfig/config.go b/envconfig/config.go index c02c4878e..105b9af6e 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -4,12 +4,14 @@ import ( "errors" "fmt" "log/slog" + "math" "net" "os" "path/filepath" "runtime" "strconv" "strings" + "time" ) type OllamaHost struct { @@ -34,7 +36,7 @@ var ( // Set via OLLAMA_HOST in the environment Host *OllamaHost // Set via OLLAMA_KEEP_ALIVE in the environment - KeepAlive string + KeepAlive time.Duration // Set via OLLAMA_LLM_LIBRARY in the environment LLMLibrary string // Set via OLLAMA_MAX_LOADED_MODELS in the environment @@ -132,6 +134,7 @@ func init() { NumParallel = 0 // Autoselect MaxRunners = 0 // Autoselect MaxQueuedRequests = 512 + KeepAlive = 5 * time.Minute LoadConfig() } @@ -266,7 +269,10 @@ func LoadConfig() { } } - KeepAlive = clean("OLLAMA_KEEP_ALIVE") + ka := clean("OLLAMA_KEEP_ALIVE") + if ka != "" { + loadKeepAlive(ka) + } var err error ModelsDir, err = getModelsDir() @@ -344,3 +350,24 @@ func getOllamaHost() (*OllamaHost, error) { Port: port, }, nil } + +func loadKeepAlive(ka string) { + v, err := strconv.Atoi(ka) + if err != nil { + d, err := time.ParseDuration(ka) + if err == nil { + if d < 0 { + KeepAlive = time.Duration(math.MaxInt64) + } else { + KeepAlive = d + } + } + } else { + d := time.Duration(v) * time.Second + if d < 0 { + KeepAlive = time.Duration(math.MaxInt64) + } else { + KeepAlive = d + } + } +} diff --git a/envconfig/config_test.go b/envconfig/config_test.go index 7d923d629..a5d73fd7c 100644 --- a/envconfig/config_test.go +++ b/envconfig/config_test.go @@ -2,8 +2,10 @@ package envconfig import ( "fmt" + "math" "net" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,6 +25,21 @@ func TestConfig(t *testing.T) { t.Setenv("OLLAMA_FLASH_ATTENTION", "1") LoadConfig() require.True(t, FlashAttention) + t.Setenv("OLLAMA_KEEP_ALIVE", "") + LoadConfig() + require.Equal(t, 5*time.Minute, KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "3") + LoadConfig() + require.Equal(t, 3*time.Second, KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "1h") + LoadConfig() + require.Equal(t, 1*time.Hour, KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "-1s") + LoadConfig() + require.Equal(t, time.Duration(math.MaxInt64), KeepAlive) + t.Setenv("OLLAMA_KEEP_ALIVE", "-1") + LoadConfig() + require.Equal(t, time.Duration(math.MaxInt64), KeepAlive) } func TestClientFromEnvironment(t *testing.T) { diff --git a/server/routes.go b/server/routes.go index b14a146c1..ac6b713a7 100644 --- a/server/routes.go +++ b/server/routes.go @@ -9,7 +9,6 @@ import ( "io" "io/fs" "log/slog" - "math" "net" "net/http" "net/netip" @@ -17,7 +16,6 @@ import ( "os/signal" "path/filepath" "slices" - "strconv" "strings" "syscall" "time" @@ -56,8 +54,6 @@ func init() { gin.SetMode(mode) } -var defaultSessionDuration = 5 * time.Minute - func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options, error) { opts := api.DefaultOptions() if err := opts.FromMap(model.Options); err != nil { @@ -133,14 +129,7 @@ func (s *Server) GenerateHandler(c *gin.Context) { return } - var sessionDuration time.Duration - if req.KeepAlive == nil { - sessionDuration = getDefaultSessionDuration() - } else { - sessionDuration = req.KeepAlive.Duration - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration) + rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) var runner *runnerRef select { case runner = <-rCh: @@ -320,32 +309,6 @@ func (s *Server) GenerateHandler(c *gin.Context) { streamResponse(c, ch) } -func getDefaultSessionDuration() time.Duration { - if envconfig.KeepAlive != "" { - v, err := strconv.Atoi(envconfig.KeepAlive) - if err != nil { - d, err := time.ParseDuration(envconfig.KeepAlive) - if err != nil { - return defaultSessionDuration - } - - if d < 0 { - return time.Duration(math.MaxInt64) - } - - return d - } - - d := time.Duration(v) * time.Second - if d < 0 { - return time.Duration(math.MaxInt64) - } - return d - } - - return defaultSessionDuration -} - func (s *Server) EmbeddingsHandler(c *gin.Context) { var req api.EmbeddingRequest err := c.ShouldBindJSON(&req) @@ -380,14 +343,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) { return } - var sessionDuration time.Duration - if req.KeepAlive == nil { - sessionDuration = getDefaultSessionDuration() - } else { - sessionDuration = req.KeepAlive.Duration - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration) + rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) var runner *runnerRef select { case runner = <-rCh: @@ -1318,14 +1274,7 @@ func (s *Server) ChatHandler(c *gin.Context) { return } - var sessionDuration time.Duration - if req.KeepAlive == nil { - sessionDuration = getDefaultSessionDuration() - } else { - sessionDuration = req.KeepAlive.Duration - } - - rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration) + rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive) var runner *runnerRef select { case runner = <-rCh: diff --git a/server/sched.go b/server/sched.go index 71b535ae2..dc492cfb3 100644 --- a/server/sched.go +++ b/server/sched.go @@ -24,7 +24,7 @@ type LlmRequest struct { model *Model opts api.Options origNumCtx int // Track the initial ctx request - sessionDuration time.Duration + sessionDuration *api.Duration successCh chan *runnerRef errCh chan error schedAttempts uint @@ -75,7 +75,7 @@ func InitScheduler(ctx context.Context) *Scheduler { } // context must be canceled to decrement ref count and release the runner -func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration time.Duration) (chan *runnerRef, chan error) { +func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration *api.Duration) (chan *runnerRef, chan error) { if opts.NumCtx < 4 { opts.NumCtx = 4 } @@ -389,7 +389,9 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm runner.expireTimer.Stop() runner.expireTimer = nil } - runner.sessionDuration = pending.sessionDuration + if pending.sessionDuration != nil { + runner.sessionDuration = pending.sessionDuration.Duration + } pending.successCh <- runner go func() { <-pending.ctx.Done() @@ -402,6 +404,10 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, if numParallel < 1 { numParallel = 1 } + sessionDuration := envconfig.KeepAlive + if req.sessionDuration != nil { + sessionDuration = req.sessionDuration.Duration + } llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts, numParallel) if err != nil { // some older models are not compatible with newer versions of llama.cpp @@ -419,7 +425,7 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, modelPath: req.model.ModelPath, llama: llama, Options: &req.opts, - sessionDuration: req.sessionDuration, + sessionDuration: sessionDuration, gpus: gpus, estimatedVRAM: llama.EstimatedVRAM(), estimatedTotal: llama.EstimatedTotal(), diff --git a/server/sched_test.go b/server/sched_test.go index be0830a34..d957927e6 100644 --- a/server/sched_test.go +++ b/server/sched_test.go @@ -44,7 +44,7 @@ func TestLoad(t *testing.T) { opts: api.DefaultOptions(), successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), - sessionDuration: 2, + sessionDuration: &api.Duration{Duration: 2 * time.Second}, } // Fail to load model first s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) { @@ -142,7 +142,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV ctx: scenario.ctx, model: model, opts: api.DefaultOptions(), - sessionDuration: 5 * time.Millisecond, + sessionDuration: &api.Duration{Duration: 5 * time.Millisecond}, successCh: make(chan *runnerRef, 1), errCh: make(chan error, 1), } @@ -156,18 +156,18 @@ func TestRequests(t *testing.T) { // Same model, same request scenario1a := newScenario(t, ctx, "ollama-model-1", 10) - scenario1a.req.sessionDuration = 5 * time.Millisecond + scenario1a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond} scenario1b := newScenario(t, ctx, "ollama-model-1", 11) scenario1b.req.model = scenario1a.req.model scenario1b.ggml = scenario1a.ggml - scenario1b.req.sessionDuration = 0 + scenario1b.req.sessionDuration = &api.Duration{Duration: 0} // simple reload of same model scenario2a := newScenario(t, ctx, "ollama-model-1", 20) tmpModel := *scenario1a.req.model scenario2a.req.model = &tmpModel scenario2a.ggml = scenario1a.ggml - scenario2a.req.sessionDuration = 5 * time.Millisecond + scenario2a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond} // Multiple loaded models scenario3a := newScenario(t, ctx, "ollama-model-3a", 1*format.GigaByte) @@ -318,11 +318,11 @@ func TestGetRunner(t *testing.T) { defer done() scenario1a := newScenario(t, ctx, "ollama-model-1a", 10) - scenario1a.req.sessionDuration = 0 + scenario1a.req.sessionDuration = &api.Duration{Duration: 0} scenario1b := newScenario(t, ctx, "ollama-model-1b", 10) - scenario1b.req.sessionDuration = 0 + scenario1b.req.sessionDuration = &api.Duration{Duration: 0} scenario1c := newScenario(t, ctx, "ollama-model-1c", 10) - scenario1c.req.sessionDuration = 0 + scenario1c.req.sessionDuration = &api.Duration{Duration: 0} envconfig.MaxQueuedRequests = 1 s := InitScheduler(ctx) s.getGpuFn = func() gpu.GpuInfoList { @@ -402,7 +402,7 @@ func TestPrematureExpired(t *testing.T) { case <-ctx.Done(): t.Fatal("timeout") } - time.Sleep(scenario1a.req.sessionDuration) + time.Sleep(scenario1a.req.sessionDuration.Duration) scenario1a.ctxDone() time.Sleep(20 * time.Millisecond) require.LessOrEqual(t, len(s.finishedReqCh), 1) @@ -423,7 +423,7 @@ func TestUseLoadedRunner(t *testing.T) { ctx: ctx, opts: api.DefaultOptions(), successCh: make(chan *runnerRef, 1), - sessionDuration: 2, + sessionDuration: &api.Duration{Duration: 2}, } finished := make(chan *LlmRequest) llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}} @@ -614,7 +614,7 @@ func TestAlreadyCanceled(t *testing.T) { dctx, done2 := context.WithCancel(ctx) done2() scenario1a := newScenario(t, dctx, "ollama-model-1", 10) - scenario1a.req.sessionDuration = 0 + scenario1a.req.sessionDuration = &api.Duration{Duration: 0} s := InitScheduler(ctx) slog.Info("scenario1a") s.pendingReqCh <- scenario1a.req From 0d16eb310ed26e5a438f482dbffe7687e106346e Mon Sep 17 00:00:00 2001 From: Anatoli Babenia Date: Thu, 4 Jul 2024 01:36:11 +0300 Subject: [PATCH 08/33] fix: use `envconfig.ModelsDir` directly (#4821) * Co-authored-by: Anatoli Babenia Co-authored-by: Maas Lalani --- envconfig/config.go | 4 ++-- server/modelpath.go | 21 +++------------------ 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/envconfig/config.go b/envconfig/config.go index 105b9af6e..62d661ebc 100644 --- a/envconfig/config.go +++ b/envconfig/config.go @@ -43,10 +43,10 @@ var ( MaxRunners int // Set via OLLAMA_MAX_QUEUE in the environment MaxQueuedRequests int - // Set via OLLAMA_MODELS in the environment - ModelsDir string // Set via OLLAMA_MAX_VRAM in the environment MaxVRAM uint64 + // Set via OLLAMA_MODELS in the environment + ModelsDir string // Set via OLLAMA_NOHISTORY in the environment NoHistory bool // Set via OLLAMA_NOPRUNE in the environment diff --git a/server/modelpath.go b/server/modelpath.go index 64f59c29a..3fdb4238f 100644 --- a/server/modelpath.go +++ b/server/modelpath.go @@ -103,18 +103,9 @@ func (mp ModelPath) GetShortTagname() string { return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag) } -// modelsDir returns the value of the OLLAMA_MODELS environment variable or the user's home directory if OLLAMA_MODELS is not set. -// The models directory is where Ollama stores its model files and manifests. -func modelsDir() (string, error) { - return envconfig.ModelsDir, nil -} - // GetManifestPath returns the path to the manifest file for the given model path, it is up to the caller to create the directory if it does not exist. func (mp ModelPath) GetManifestPath() (string, error) { - dir, err := modelsDir() - if err != nil { - return "", err - } + dir := envconfig.ModelsDir return filepath.Join(dir, "manifests", mp.Registry, mp.Namespace, mp.Repository, mp.Tag), nil } @@ -127,10 +118,7 @@ func (mp ModelPath) BaseURL() *url.URL { } func GetManifestPath() (string, error) { - dir, err := modelsDir() - if err != nil { - return "", err - } + dir := envconfig.ModelsDir path := filepath.Join(dir, "manifests") if err := os.MkdirAll(path, 0o755); err != nil { @@ -141,10 +129,7 @@ func GetManifestPath() (string, error) { } func GetBlobsPath(digest string) (string, error) { - dir, err := modelsDir() - if err != nil { - return "", err - } + dir := envconfig.ModelsDir // only accept actual sha256 digests pattern := "^sha256[:-][0-9a-fA-F]{64}$" From 4d71c559b21ec9207a328b824ce534bdbaf59f2d Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Wed, 3 Jul 2024 20:04:30 -0400 Subject: [PATCH 09/33] fix error detection by limiting model loading error parsing (#5472) --- llm/status.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/status.go b/llm/status.go index 0f56b7f99..d9f361155 100644 --- a/llm/status.go +++ b/llm/status.go @@ -25,7 +25,7 @@ var errorPrefixes = []string{ "CUDA error", "cudaMalloc failed", "\"ERR\"", - "architecture", + "error loading model", } func (w *StatusWriter) Write(b []byte) (int, error) { From 52abc8acb702cad0b58cee92721e64687f5a6c85 Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Mon, 13 May 2024 15:08:29 -0700 Subject: [PATCH 10/33] Document older win10 terminal problems We haven't found a workaround, so for now recommend updating. --- docs/troubleshooting.md | 5 +++++ docs/windows.md | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index bbb771831..484c4b6ce 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -89,3 +89,8 @@ Sometimes the Ollama can have difficulties initializing the GPU. When you check If none of those resolve the problem, gather additional information and file an issue: - Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs - Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia` + + +## Windows Terminal Errors + +Older versions of Windows 10 (e.g., 21H1) are known to have a bug where the standard terminal program does not display control characters correctly. This can result in a long string of strings like `←[?25h←[?25l` being displayed, sometimes erroring with `The parameter is incorrect` To resolve this problem, please update to Win 10 22H1 or newer. diff --git a/docs/windows.md b/docs/windows.md index abc0eb300..69c2aa6d1 100644 --- a/docs/windows.md +++ b/docs/windows.md @@ -19,7 +19,7 @@ Logs will often be helpful in diagnosing the problem (see ## System Requirements -* Windows 10 or newer, Home or Pro +* Windows 10 22H2 or newer, Home or Pro * NVIDIA 452.39 or newer Drivers if you have an NVIDIA card * AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card From e9188e971a998faff7aabd867ebc0ef1dc7f672b Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 11:20:57 -0400 Subject: [PATCH 11/33] Fix assert on small embedding inputs (#5491) * Fix assert on small embedding inputs * Update llm/patches/09-pooling.diff --- llm/patches/09-pooling.diff | 60 +++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 llm/patches/09-pooling.diff diff --git a/llm/patches/09-pooling.diff b/llm/patches/09-pooling.diff new file mode 100644 index 000000000..348fbfdc4 --- /dev/null +++ b/llm/patches/09-pooling.diff @@ -0,0 +1,60 @@ +diff --git a/llama.cpp b/llama.cpp +index 61948751..61fe7b57 100644 +--- a/llama.cpp ++++ b/llama.cpp +@@ -7591,14 +7591,14 @@ struct llm_build_context { + } + + struct ggml_tensor * build_inp_mean() { +- lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens); ++ lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, cparams.n_seq_max); + cb(lctx.inp_mean, "inp_mean", -1); + ggml_set_input(lctx.inp_mean); + return lctx.inp_mean; + } + + struct ggml_tensor * build_inp_cls() { +- lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens); ++ lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, cparams.n_seq_max); + cb(lctx.inp_cls, "inp_cls", -1); + ggml_set_input(lctx.inp_cls); + return lctx.inp_cls; +@@ -12062,19 +12062,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer)); + + float * data = (float *) lctx.inp_mean->data; +- memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean)); ++ memset(lctx.inp_mean->data, 0, n_tokens * cparams.n_seq_max * ggml_element_size(lctx.inp_mean)); + + std::vector sum(n_tokens, 0); + for (int i = 0; i < n_tokens; ++i) { + const llama_seq_id seq_id = batch.seq_id[i][0]; +- +- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); +- + sum[seq_id] += 1; + } + +- std::vector div(n_tokens, 0.0f); +- for (int i = 0; i < n_tokens; ++i) { ++ std::vector div(cparams.n_seq_max, 0.0f); ++ for (uint32_t i = 0; i < cparams.n_seq_max; ++i) { + const uint64_t s = sum[i]; + if (s > 0) { + div[i] = 1.0f/float(s); +@@ -12094,14 +12091,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + + uint32_t * data = (uint32_t *) lctx.inp_cls->data; +- memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); ++ memset(lctx.inp_cls->data, 0, cparams.n_seq_max * ggml_element_size(lctx.inp_cls)); + + for (int i = 0; i < n_tokens; ++i) { + const llama_seq_id seq_id = batch.seq_id[i][0]; + const llama_pos pos = batch.pos[i]; +- +- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS"); +- + if (pos == 0) { + data[seq_id] = i; + } From d89454de805c6d9507796cf2a262986db43ed849 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 12:32:47 -0400 Subject: [PATCH 12/33] Use slot with cached prompt instead of least recently used (#5492) * Use common prefix to select slot * actually report `longest` --- llm/ext_server/server.cpp | 40 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 099705998..00a15b4a3 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1382,12 +1382,50 @@ struct llama_server_context } } + std::string common_prefix(const std::string& str1, const std::string& str2) { + auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin()); + return std::string(str1.begin(), mismatch_pair.first); + } + + // Find the slot that has the greatest common prefix + server_slot *prefix_slot(const json &prompt) { + if (!prompt.is_string()) { + return nullptr; + } + + std::string prompt_str = prompt.get(); + server_slot *slot = nullptr; + size_t longest = 0; + + for (server_slot &s : slots) { + if (s.available() && s.prompt.is_string()) { + std::string s_prompt = s.prompt.get(); + std::string prefix = common_prefix(s_prompt, prompt_str); + + if (prefix.size() > longest) { + slot = &s; + longest = prefix.size(); + } + } + } + + if (!slot) { + return get_slot(-1); + } + + LOG_INFO("slot with common prefix found", {{ + "slot_id", slot->id, + "characters", longest + }}); + return slot; + } + void process_single_task(task_server& task) { switch (task.type) { case TASK_TYPE_COMPLETION: { - server_slot *slot = get_slot(json_value(task.data, "slot_id", -1)); + server_slot *slot = prefix_slot(task.data["prompt"]); if (slot == nullptr) { // if no slot is available, we defer this task for processing later From 8f8e736b131510c8707bed5886b343906cb74a24 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 13:25:58 -0400 Subject: [PATCH 13/33] update llama.cpp submodule to `d7fd29f` (#5475) --- docs/development.md | 2 +- llm/ext_server/CMakeLists.txt | 26 +- llm/generate/gen_darwin.sh | 16 +- llm/generate/gen_linux.sh | 36 +-- llm/generate/gen_windows.ps1 | 44 ++-- llm/llama.cpp | 2 +- llm/llm.go | 14 +- llm/patches/01-load-progress.diff | 14 +- llm/patches/03-load_exception.diff | 24 +- llm/patches/04-metal.diff | 6 +- llm/patches/05-default-pretokenizer.diff | 18 +- llm/patches/06-qwen2.diff | 6 +- llm/patches/07-embeddings.diff | 45 ++++ llm/patches/07-gemma.diff | 305 ----------------------- llm/patches/09-pooling.diff | 14 +- 15 files changed, 150 insertions(+), 422 deletions(-) create mode 100644 llm/patches/07-embeddings.diff delete mode 100644 llm/patches/07-gemma.diff diff --git a/docs/development.md b/docs/development.md index 2a6886a43..cd6c41af5 100644 --- a/docs/development.md +++ b/docs/development.md @@ -104,7 +104,7 @@ like to use. For example, to compile an optimized binary for an Intel i9-9880H, you might use: ``` -OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./... +OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on" go generate ./... go build . ``` diff --git a/llm/ext_server/CMakeLists.txt b/llm/ext_server/CMakeLists.txt index db7d52dcc..9de50739c 100644 --- a/llm/ext_server/CMakeLists.txt +++ b/llm/ext_server/CMakeLists.txt @@ -1,14 +1,14 @@ - -set(TARGET ollama_llama_server) -option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) -install(TARGETS ${TARGET} RUNTIME) -target_compile_definitions(${TARGET} PRIVATE - SERVER_VERBOSE=$ -) -target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT}) -if (WIN32) - TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) -endif() + +set(TARGET ollama_llama_server) +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) +install(TARGETS ${TARGET} RUNTIME) +target_compile_definitions(${TARGET} PRIVATE + SERVER_VERBOSE=$ +) +target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) +if (WIN32) + TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) +endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) \ No newline at end of file diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index 721a9ae80..02577545a 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -18,16 +18,16 @@ sign() { fi } -COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DLLAMA_METAL_EMBED_LIBRARY=on -DLLAMA_OPENMP=off" +COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off" case "${GOARCH}" in "amd64") - COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_NATIVE=off" + COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DGGML_METAL=off -DGGML_NATIVE=off" # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_BLAS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build @@ -37,7 +37,7 @@ case "${GOARCH}" in # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_BLAS=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/cpu" echo "Building LCD CPU" build @@ -49,7 +49,7 @@ case "${GOARCH}" in # Approximately 400% faster than LCD on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_BLAS=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/cpu_avx" echo "Building AVX CPU" build @@ -61,7 +61,7 @@ case "${GOARCH}" in # Approximately 10% faster than AVX on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=on -DLLAMA_BLAS=off -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=on -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2" echo "Building AVX2 CPU" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation" @@ -75,14 +75,14 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_BLAS=off -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then init_vars - CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DLLAMA_ACCELERATE=on -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=on ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}/metal" EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders" build diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 28ce1f21d..c36862520 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -51,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then export CUDACXX=$(command -v nvcc) fi fi -COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off" +COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off" source $(dirname $0)/gen_common.sh init_vars git_module_setup @@ -64,7 +64,7 @@ if [ -z "${OLLAMA_SKIP_STATIC_GENERATE}" -o "${OLLAMA_CPU_TARGET}" = "static" ]; # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off ${CMAKE_DEFS}" + CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DGGML_NATIVE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}_static" echo "Building static library" build @@ -84,22 +84,22 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then compress else # Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512 - # -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer - # -DLLAMA_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX) - # -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen - # -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver + # -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer + # -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX) + # -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen + # -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver # Note: the following seem to yield slower results than AVX2 - ymmv - # -DLLAMA_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT) - # -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake - # -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake + # -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT) + # -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake + # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake - COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off" + COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off" if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then # # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu" echo "Building LCD CPU" build @@ -116,7 +116,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # Approximately 400% faster than LCD on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu_avx" echo "Building AVX CPU" build @@ -129,7 +129,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # Approximately 10% faster than AVX on same CPU # init_vars - CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu_avx2" echo "Building AVX2 CPU" build @@ -170,15 +170,15 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then # # CUDA compute < 6.0 lacks proper FP16 support on ARM. # Disabling has minimal performance effect while maintaining compatibility. - ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off" + ARM64_DEFS="-DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_CUDA_F16=off" fi # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\"" - CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}" echo "Building custom CUDA GPU" else - CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}" + CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DGGML_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat" fi CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}" @@ -216,7 +216,7 @@ if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then init_vars source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI CC=icx - CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL=ON -DLLAMA_SYCL_F16=OFF" + CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DGGML_SYCL_F16=OFF" BUILD_DIR="../build/linux/${ARCH}/oneapi" EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb" DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it @@ -254,7 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true) fi init_vars - CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)" + CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)" # Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\"" diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index e217a0382..5c6943502 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -39,8 +39,8 @@ function init_vars { } $script:cmakeDefs = @( "-DBUILD_SHARED_LIBS=on", - "-DLLAMA_NATIVE=off", - "-DLLAMA_OPENMP=off" + "-DGGML_NATIVE=off", + "-DGGML_OPENMP=off" ) $script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on") $script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower() @@ -182,9 +182,9 @@ function cleanup { } -# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer -# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen -# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver +# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer +# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen +# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver function build_static() { @@ -204,13 +204,13 @@ function build_static() { "-DCMAKE_C_COMPILER=gcc.exe", "-DCMAKE_CXX_COMPILER=g++.exe", "-DBUILD_SHARED_LIBS=off", - "-DLLAMA_NATIVE=off", - "-DLLAMA_AVX=off", - "-DLLAMA_AVX2=off", - "-DLLAMA_AVX512=off", - "-DLLAMA_F16C=off", - "-DLLAMA_FMA=off", - "-DLLAMA_OPENMP=off") + "-DGGML_NATIVE=off", + "-DGGML_AVX=off", + "-DGGML_AVX2=off", + "-DGGML_AVX512=off", + "-DGGML_F16C=off", + "-DGGML_FMA=off", + "-DGGML_OPENMP=off") $script:buildDir="../build/windows/${script:ARCH}_static" write-host "Building static library" build @@ -224,7 +224,7 @@ function build_cpu($gen_arch) { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) { # remaining llama.cpp builds use MSVC init_vars - $script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DLLAMA_AVX=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs + $script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs $script:buildDir="../build/windows/${script:ARCH}/cpu" $script:distDir="$script:DIST_BASE\cpu" write-host "Building LCD CPU" @@ -239,7 +239,7 @@ function build_cpu($gen_arch) { function build_cpu_avx() { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) { init_vars - $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs + $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs $script:buildDir="../build/windows/${script:ARCH}/cpu_avx" $script:distDir="$script:DIST_BASE\cpu_avx" write-host "Building AVX CPU" @@ -254,7 +254,7 @@ function build_cpu_avx() { function build_cpu_avx2() { if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) { init_vars - $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=on", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=on", "-DLLAMA_F16C=on") + $script:cmakeDefs + $script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=on", "-DGGML_AVX512=off", "-DGGML_FMA=on", "-DGGML_F16C=on") + $script:cmakeDefs $script:buildDir="../build/windows/${script:ARCH}/cpu_avx2" $script:distDir="$script:DIST_BASE\cpu_avx2" write-host "Building AVX2 CPU" @@ -279,9 +279,9 @@ function build_cuda() { $script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT" $script:cmakeDefs += @( "-A", "x64", - "-DLLAMA_CUDA=ON", - "-DLLAMA_AVX=on", - "-DLLAMA_AVX2=off", + "-DGGML_CUDA=ON", + "-DGGML_AVX=on", + "-DGGML_AVX2=off", "-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR", "-DCMAKE_CUDA_FLAGS=-t8", "-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}" @@ -319,7 +319,7 @@ function build_oneapi() { $script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT" $script:cmakeDefs += @( "-G", "MinGW Makefiles", - "-DLLAMA_SYCL=ON", + "-DGGML_SYCL=ON", "-DCMAKE_C_COMPILER=icx", "-DCMAKE_CXX_COMPILER=icx", "-DCMAKE_BUILD_TYPE=Release" @@ -365,10 +365,10 @@ function build_rocm() { "-G", "Ninja", "-DCMAKE_C_COMPILER=clang.exe", "-DCMAKE_CXX_COMPILER=clang++.exe", - "-DLLAMA_HIPBLAS=on", + "-DGGML_HIPBLAS=on", "-DHIP_PLATFORM=amd", - "-DLLAMA_AVX=on", - "-DLLAMA_AVX2=off", + "-DGGML_AVX=on", + "-DGGML_AVX2=off", "-DCMAKE_POSITION_INDEPENDENT_CODE=on", "-DAMDGPU_TARGETS=$(amdGPUs)", "-DGPU_TARGETS=$(amdGPUs)" diff --git a/llm/llama.cpp b/llm/llama.cpp index 7c26775ad..d7fd29fff 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit 7c26775adb579e92b59c82e8084c07a1d0f75e9c +Subproject commit d7fd29fff16456ce9c3a23fd2d09a66256b05aff diff --git a/llm/llm.go b/llm/llm.go index 2a0c4b91a..157176246 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,12 +1,12 @@ package llm -// #cgo CFLAGS: -Illama.cpp -// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/libllama.a -lstdc++ -// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/libllama.a -lstdc++ -// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/libllama.a -static -lstdc++ -// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/libllama.a -static -lstdc++ -// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/libllama.a -lstdc++ -// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/libllama.a -lstdc++ +// #cgo CFLAGS: -Illama.cpp/include -Illama.cpp/ggml/include +// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/src/libllama.a ${SRCDIR}/build/darwin/arm64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal +// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/src/libllama.a ${SRCDIR}/build/darwin/x86_64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal +// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/src/libllama.a ${SRCDIR}/build/windows/amd64_static/ggml/src/libggml.a -static -lstdc++ +// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/src/libllama.a ${SRCDIR}/build/windows/arm64_static/ggml/src/libggml.a -static -lstdc++ +// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/src/libllama.a ${SRCDIR}/build/linux/x86_64_static/ggml/src/libggml.a -lstdc++ +// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml. -lstdc++ // #include // #include "llama.h" import "C" diff --git a/llm/patches/01-load-progress.diff b/llm/patches/01-load-progress.diff index be5286091..a053c1c2c 100644 --- a/llm/patches/01-load-progress.diff +++ b/llm/patches/01-load-progress.diff @@ -1,8 +1,8 @@ diff --git a/common/common.cpp b/common/common.cpp -index 73ff0e85..6adb1a92 100644 +index 2c05a4d4..927f0e3d 100644 --- a/common/common.cpp +++ b/common/common.cpp -@@ -2447,6 +2447,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & +@@ -2093,6 +2093,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & mparams.use_mmap = params.use_mmap; mparams.use_mlock = params.use_mlock; mparams.check_tensors = params.check_tensors; @@ -12,10 +12,10 @@ index 73ff0e85..6adb1a92 100644 mparams.kv_overrides = NULL; } else { diff --git a/common/common.h b/common/common.h -index 58ed72f4..0bb2605e 100644 +index 65c0ef81..ebca2c77 100644 --- a/common/common.h +++ b/common/common.h -@@ -180,6 +180,13 @@ struct gpt_params { +@@ -184,6 +184,13 @@ struct gpt_params { std::string mmproj = ""; // path to multimodal projector std::vector image; // path to image file(s) @@ -26,6 +26,6 @@ index 58ed72f4..0bb2605e 100644 + // context pointer passed to the progress callback + void * progress_callback_user_data; + - // server params - int32_t port = 8080; // server listens on this network port - int32_t timeout_read = 600; // http read timeout in seconds + // embedding + bool embedding = false; // get only sentence embedding + int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm) diff --git a/llm/patches/03-load_exception.diff b/llm/patches/03-load_exception.diff index eb245c2a9..026661963 100644 --- a/llm/patches/03-load_exception.diff +++ b/llm/patches/03-load_exception.diff @@ -1,17 +1,8 @@ -From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001 -From: Michael Yang -Date: Thu, 23 May 2024 11:18:45 -0700 -Subject: [PATCH] throw exception on load errors - ---- - llama.cpp | 25 ++++++++++++++++--------- - 1 file changed, 16 insertions(+), 9 deletions(-) - -diff --git a/llama.cpp b/llama.cpp -index 15c66077..8ba90b6a 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam +diff --git a/src/llama.cpp b/src/llama.cpp +index 73f52435..58a00fb1 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -7241,7 +7241,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam } } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); @@ -20,7 +11,7 @@ index 15c66077..8ba90b6a 100644 } return 0; -@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file( +@@ -17564,16 +17564,23 @@ struct llama_model * llama_load_model_from_file( } model->rpc_servers.push_back(servers); } @@ -52,6 +43,3 @@ index 15c66077..8ba90b6a 100644 } return model; --- -2.45.1 - diff --git a/llm/patches/04-metal.diff b/llm/patches/04-metal.diff index f8fa7db76..e63732e70 100644 --- a/llm/patches/04-metal.diff +++ b/llm/patches/04-metal.diff @@ -1,7 +1,7 @@ -diff --git a/ggml-metal.m b/ggml-metal.m +diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m index 0207b787..b5e9884b 100644 ---- a/ggml-metal.m -+++ b/ggml-metal.m +--- a/ggml/src/ggml-metal.m ++++ b/ggml/src/ggml-metal.m @@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute( // to the matrix-vector kernel int ne11_mm_min = 1; diff --git a/llm/patches/05-default-pretokenizer.diff b/llm/patches/05-default-pretokenizer.diff index 2a2e7306e..f4eaced72 100644 --- a/llm/patches/05-default-pretokenizer.diff +++ b/llm/patches/05-default-pretokenizer.diff @@ -1,8 +1,8 @@ -diff --git a/llama.cpp b/llama.cpp -index 61948751..4b72a293 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -4824,16 +4824,7 @@ static void llm_load_vocab( +diff --git a/src/llama.cpp b/src/llama.cpp +index 73f52435..2b81b4bd 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -5092,16 +5092,7 @@ static void llm_load_vocab( // for now, only BPE models have pre-tokenizers if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { @@ -20,13 +20,13 @@ index 61948751..4b72a293 100644 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } else if ( tokenizer_pre == "llama3" || -@@ -4888,7 +4879,8 @@ static void llm_load_vocab( - tokenizer_pre == "poro-chat") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO; +@@ -5164,7 +5155,8 @@ static void llm_load_vocab( + tokenizer_pre == "jais") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS; } else { - throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); + LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__); + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } - } else { + } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; diff --git a/llm/patches/06-qwen2.diff b/llm/patches/06-qwen2.diff index d7b0c1555..1c7109f6f 100644 --- a/llm/patches/06-qwen2.diff +++ b/llm/patches/06-qwen2.diff @@ -1,7 +1,7 @@ -diff --git a/llama.cpp b/llama.cpp +diff --git a/src/llama.cpp b/src/llama.cpp index 40d2ec2c..f34eb79a 100644 ---- a/llama.cpp -+++ b/llama.cpp +--- a/src/llama.cpp ++++ b/src/llama.cpp @@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); cb(kq, "kq", il); diff --git a/llm/patches/07-embeddings.diff b/llm/patches/07-embeddings.diff new file mode 100644 index 000000000..a84e3b06c --- /dev/null +++ b/llm/patches/07-embeddings.diff @@ -0,0 +1,45 @@ +diff --git a/src/llama.cpp b/src/llama.cpp +index 1fe2b9f7..a43312a7 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -13689,7 +13689,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) { + const auto n_embd = hparams.n_embd; + + // TODO: use a per-batch flag for logits presence instead +- const bool has_logits = !cparams.embeddings; ++ const bool has_logits = cparams.causal_attn; + const bool has_embd = lctx.is_encoding || (cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE)); + + const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0; +@@ -13959,17 +13959,25 @@ static int llama_decode_internal( + // no output + res = nullptr; + embd = nullptr; +- } else if (cparams.embeddings) { +- res = nullptr; // do not extract logits for embedding case +- embd = gf->nodes[gf->n_nodes - 1]; +- if (strcmp(embd->name, "result_embd_pooled") != 0) { +- embd = gf->nodes[gf->n_nodes - 2]; ++ } ++ ++ if (cparams.embeddings) { ++ for (int i = gf->n_nodes - 1; i >= 0; --i) { ++ embd = gf->nodes[i]; ++ if (strcmp(embd->name, "result_embd_pooled") == 0) { ++ break; ++ } + } + GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor"); +- } else { ++ } else { + embd = nullptr; // do not extract embeddings when not needed + GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor"); + } ++ ++ if (!cparams.causal_attn) { ++ res = nullptr; // do not extract logits when not needed ++ } ++ + // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); + + ggml_backend_sched_alloc_graph(lctx.sched, gf); diff --git a/llm/patches/07-gemma.diff b/llm/patches/07-gemma.diff deleted file mode 100644 index 86eac3d17..000000000 --- a/llm/patches/07-gemma.diff +++ /dev/null @@ -1,305 +0,0 @@ -From 5cadb45f39d001ffbad95b690d6cf0abcb4a6d96 Mon Sep 17 00:00:00 2001 -From: Ollama maintainers -Date: Wed, 26 Jun 2024 16:18:09 -0700 -Subject: [PATCH] Architecture support - ---- - llama.cpp | 194 +++++++++++++++++++++++++++++++++++++++++++++++++++++- - 1 file changed, 193 insertions(+), 1 deletion(-) - -diff --git a/llama.cpp b/llama.cpp -index 61948751..3b4196f5 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -217,6 +217,7 @@ enum llm_arch { - LLM_ARCH_INTERNLM2, - LLM_ARCH_MINICPM, - LLM_ARCH_GEMMA, -+ LLM_ARCH_GEMMA2, - LLM_ARCH_STARCODER2, - LLM_ARCH_MAMBA, - LLM_ARCH_XVERSE, -@@ -255,6 +256,7 @@ static const std::map LLM_ARCH_NAMES = { - { LLM_ARCH_INTERNLM2, "internlm2" }, - { LLM_ARCH_MINICPM, "minicpm" }, - { LLM_ARCH_GEMMA, "gemma" }, -+ { LLM_ARCH_GEMMA2, "gemma2" }, - { LLM_ARCH_STARCODER2, "starcoder2" }, - { LLM_ARCH_MAMBA, "mamba" }, - { LLM_ARCH_XVERSE, "xverse" }, -@@ -464,10 +466,12 @@ enum llm_tensor { - LLM_TENSOR_ATTN_NORM, - LLM_TENSOR_ATTN_NORM_2, - LLM_TENSOR_ATTN_OUT_NORM, -+ LLM_TENSOR_ATTN_POST_NORM, - LLM_TENSOR_ATTN_ROT_EMBD, - LLM_TENSOR_FFN_GATE_INP, - LLM_TENSOR_FFN_GATE_INP_SHEXP, - LLM_TENSOR_FFN_NORM, -+ LLM_TENSOR_FFN_POST_NORM, - LLM_TENSOR_FFN_GATE, - LLM_TENSOR_FFN_DOWN, - LLM_TENSOR_FFN_UP, -@@ -960,6 +964,24 @@ static const std::map> LLM_TENSOR_NA - { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, - }, - }, -+ { -+ LLM_ARCH_GEMMA2, -+ { -+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, -+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, -+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, -+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, -+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, -+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, -+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, -+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, -+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, -+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, -+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, -+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, -+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, -+ }, -+ }, - { - LLM_ARCH_STARCODER2, - { -@@ -1941,6 +1963,8 @@ enum e_model { - MODEL_8x22B, - MODEL_16x12B, - MODEL_10B_128x3_66B, -+ MODEL_9B, -+ MODEL_27B, - }; - - static const size_t kiB = 1024; -@@ -2114,6 +2138,7 @@ struct llama_layer { - struct ggml_tensor * attn_out_norm_b; - struct ggml_tensor * attn_q_a_norm; - struct ggml_tensor * attn_kv_a_norm; -+ struct ggml_tensor * attn_post_norm; - - // attention - struct ggml_tensor * wq; -@@ -2136,6 +2161,7 @@ struct llama_layer { - // normalization - struct ggml_tensor * ffn_norm; - struct ggml_tensor * ffn_norm_b; -+ struct ggml_tensor * ffn_post_norm; - struct ggml_tensor * layer_out_norm; - struct ggml_tensor * layer_out_norm_b; - struct ggml_tensor * ffn_norm_exps; -@@ -4529,6 +4555,16 @@ static void llm_load_hparams( - } - } break; - case LLM_ARCH_GEMMA: -+ { -+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); -+ -+ switch (hparams.n_layer) { -+ case 18: model.type = e_model::MODEL_9B; break; -+ case 28: model.type = e_model::MODEL_27B; break; -+ default: model.type = e_model::MODEL_UNKNOWN; -+ } -+ } break; -+ case LLM_ARCH_GEMMA2: - { - ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - -@@ -6305,6 +6341,40 @@ static bool llm_load_tensors( - layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); - } - } break; -+ case LLM_ARCH_GEMMA2: -+ { -+ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); -+ -+ // output -+ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); -+ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading -+ -+ const int64_t n_ff = hparams.n_ff; -+ const int64_t n_embd_head_k = hparams.n_embd_head_k; -+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); -+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); -+ -+ for (uint32_t i = 0; i < n_layer; ++i) { -+ ggml_context * ctx_layer = ctx_for_layer(i); -+ ggml_context * ctx_split = ctx_for_layer_split(i); -+ -+ auto & layer = model.layers[i]; -+ -+ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); -+ -+ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * hparams.n_head}); -+ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}); -+ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}); -+ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * hparams.n_head, n_embd}); -+ layer.attn_post_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}); -+ -+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); -+ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); -+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); -+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); -+ layer.ffn_post_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}); -+ } -+ } break; - case LLM_ARCH_STARCODER2: - { - model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); -@@ -10614,6 +10684,123 @@ struct llm_build_context { - return gf; - } - -+ struct ggml_cgraph * build_gemma2() { -+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); -+ -+ const int64_t n_embd_head_k = hparams.n_embd_head_k; -+ -+ struct ggml_tensor * cur; -+ struct ggml_tensor * inpL; -+ -+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb); -+ -+ inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd)); -+ cb(inpL, "inp_scaled", -1); -+ -+ // inp_pos - contains the positions -+ struct ggml_tensor * inp_pos = build_inp_pos(); -+ -+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads) -+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask(); -+ -+ for (int il = 0; il < n_layer; ++il) { -+ // norm -+ cur = llm_build_norm(ctx0, inpL, hparams, -+ model.layers[il].attn_norm, NULL, -+ LLM_NORM_RMS, cb, il); -+ cb(cur, "attn_norm", il); -+ -+ // self-attention -+ { -+ // compute Q and K and RoPE them -+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur); -+ cb(Qcur, "Qcur", il); -+ -+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur); -+ cb(Kcur, "Kcur", il); -+ -+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur); -+ cb(Vcur, "Vcur", il); -+ -+ Qcur = ggml_rope_ext( -+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr, -+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale, -+ ext_factor, attn_factor, beta_fast, beta_slow); -+ cb(Qcur, "Qcur", il); -+ -+ Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); -+ cb(Qcur, "Qcur_scaled", il); -+ -+ Kcur = ggml_rope_ext( -+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr, -+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale, -+ ext_factor, attn_factor, beta_fast, beta_slow); -+ cb(Kcur, "Kcur", il); -+ -+ cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf, -+ model.layers[il].wo, NULL, -+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il); -+ } -+ -+ if (il == n_layer - 1) { -+ // skip computing output for unused tokens -+ struct ggml_tensor * inp_out_ids = build_inp_out_ids(); -+ cur = ggml_get_rows(ctx0, cur, inp_out_ids); -+ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids); -+ } -+ -+ cur = llm_build_norm(ctx0, cur, hparams, -+ model.layers[il].attn_post_norm, NULL, -+ LLM_NORM_RMS, cb, il); -+ cb(cur, "attn_post_norm", il); -+ -+ struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL); -+ cb(sa_out, "sa_out", il); -+ -+ cur = llm_build_norm(ctx0, sa_out, hparams, -+ model.layers[il].ffn_norm, NULL, -+ LLM_NORM_RMS, cb, il); -+ cb(cur, "ffn_norm", il); -+ -+ // feed-forward network -+ { -+ cur = llm_build_ffn(ctx0, cur, -+ model.layers[il].ffn_up, NULL, -+ model.layers[il].ffn_gate, NULL, -+ model.layers[il].ffn_down, NULL, -+ NULL, -+ LLM_FFN_GELU, LLM_FFN_PAR, cb, il); -+ cb(cur, "ffn_out", il); -+ } -+ -+ cur = llm_build_norm(ctx0, cur, hparams, -+ model.layers[il].ffn_post_norm, NULL, -+ LLM_NORM_RMS, cb, -1); -+ cb(cur, "ffn_post_norm", -1); -+ -+ cur = ggml_add(ctx0, cur, sa_out); -+ cb(cur, "l_out", il); -+ -+ // input for next layer -+ inpL = cur; -+ } -+ -+ cur = inpL; -+ -+ cur = llm_build_norm(ctx0, cur, hparams, -+ model.output_norm, NULL, -+ LLM_NORM_RMS, cb, -1); -+ cb(cur, "result_norm", -1); -+ -+ // lm_head -+ cur = ggml_mul_mat(ctx0, model.output, cur); -+ cb(cur, "result_output", -1); -+ -+ ggml_build_forward_expand(gf, cur); -+ -+ return gf; -+ } -+ - struct ggml_cgraph * build_starcoder2() { - struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false); - -@@ -11847,6 +12034,10 @@ static struct ggml_cgraph * llama_build_graph( - { - result = llm.build_gemma(); - } break; -+ case LLM_ARCH_GEMMA2: -+ { -+ result = llm.build_gemma2(); -+ } break; - case LLM_ARCH_STARCODER2: - { - result = llm.build_starcoder2(); -@@ -16671,6 +16862,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { - case LLM_ARCH_PHI2: - case LLM_ARCH_PHI3: - case LLM_ARCH_GEMMA: -+ case LLM_ARCH_GEMMA2: - case LLM_ARCH_STARCODER2: - case LLM_ARCH_GPTNEOX: - return LLAMA_ROPE_TYPE_NEOX; -@@ -18551,7 +18743,7 @@ static int32_t llama_chat_apply_template_internal( - if (add_ass) { - ss << "assistant\n"; - } -- } else if (tmpl == "gemma" || tmpl.find("") != std::string::npos) { -+ } else if (tmpl == "gemma" || tmpl == "gemma2" || tmpl.find("") != std::string::npos) { - // google/gemma-7b-it - std::string system_prompt = ""; - for (auto message : chat) { --- -2.45.2 - diff --git a/llm/patches/09-pooling.diff b/llm/patches/09-pooling.diff index 348fbfdc4..2e4fe11ee 100644 --- a/llm/patches/09-pooling.diff +++ b/llm/patches/09-pooling.diff @@ -1,8 +1,8 @@ -diff --git a/llama.cpp b/llama.cpp -index 61948751..61fe7b57 100644 ---- a/llama.cpp -+++ b/llama.cpp -@@ -7591,14 +7591,14 @@ struct llm_build_context { +diff --git a/src/llama.cpp b/src/llama.cpp +index 721b8f4e..cfe7ac40 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -8420,14 +8420,14 @@ struct llm_build_context { } struct ggml_tensor * build_inp_mean() { @@ -19,7 +19,7 @@ index 61948751..61fe7b57 100644 cb(lctx.inp_cls, "inp_cls", -1); ggml_set_input(lctx.inp_cls); return lctx.inp_cls; -@@ -12062,19 +12062,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { +@@ -13847,19 +13847,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer)); float * data = (float *) lctx.inp_mean->data; @@ -42,7 +42,7 @@ index 61948751..61fe7b57 100644 const uint64_t s = sum[i]; if (s > 0) { div[i] = 1.0f/float(s); -@@ -12094,14 +12091,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { +@@ -13879,14 +13876,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) { GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); uint32_t * data = (uint32_t *) lctx.inp_cls->data; From 78fb33dd07ecbbd78de2293bc542187afa6b671b Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 15:18:36 -0400 Subject: [PATCH 14/33] fix typo in cgo directives in `llm.go` (#5501) --- llm/llm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/llm.go b/llm/llm.go index 157176246..fb6d4b5c7 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -6,7 +6,7 @@ package llm // #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/src/libllama.a ${SRCDIR}/build/windows/amd64_static/ggml/src/libggml.a -static -lstdc++ // #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/src/libllama.a ${SRCDIR}/build/windows/arm64_static/ggml/src/libggml.a -static -lstdc++ // #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/src/libllama.a ${SRCDIR}/build/linux/x86_64_static/ggml/src/libggml.a -lstdc++ -// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml. -lstdc++ +// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml.a -lstdc++ // #include // #include "llama.h" import "C" From 631cfd9e62362d6aea72da96fa67c52a1fd4e990 Mon Sep 17 00:00:00 2001 From: Blake Mizerany Date: Fri, 5 Jul 2024 13:42:30 -0700 Subject: [PATCH 15/33] types/model: remove knowledge of digest (#5500) This was leading to ambiguity and confusion in ollama.com, and is not used anywhere in ollama at the moment. Once manifests are addressable by digest, we can add this back in, and in a way that is more tailored to the concept of addressing a manifest by digest. --- types/model/name.go | 22 +++++++--------------- types/model/name_test.go | 34 +++++++--------------------------- 2 files changed, 14 insertions(+), 42 deletions(-) diff --git a/types/model/name.go b/types/model/name.go index e645a844c..5e475687e 100644 --- a/types/model/name.go +++ b/types/model/name.go @@ -91,7 +91,6 @@ type Name struct { Namespace string Model string Tag string - RawDigest string } // ParseName parses and assembles a Name from a name string. The @@ -143,11 +142,6 @@ func ParseNameBare(s string) Name { var n Name var promised bool - s, n.RawDigest, promised = cutLast(s, "@") - if promised && n.RawDigest == "" { - n.RawDigest = MissingPart - } - // "/" is an illegal tag character, so we can use it to split the host if strings.LastIndex(s, ":") > strings.LastIndex(s, "/") { s, n.Tag, _ = cutPromised(s, ":") @@ -222,10 +216,6 @@ func (n Name) String() string { b.WriteByte(':') b.WriteString(n.Tag) } - if n.RawDigest != "" { - b.WriteByte('@') - b.WriteString(n.RawDigest) - } return b.String() } @@ -250,16 +240,18 @@ func (n Name) DisplayShortest() string { return sb.String() } -func IsValidNamespace(namespace string) bool { - return isValidPart(kindNamespace, namespace) +// IsValidNamespace reports whether the provided string is a valid +// namespace. +func IsValidNamespace(s string) bool { + return isValidPart(kindNamespace, s) } // IsValid reports whether all parts of the name are present and valid. The // digest is a special case, and is checked for validity only if present. +// +// Note: The digest check has been removed as is planned to be added back in +// at a later time. func (n Name) IsValid() bool { - if n.RawDigest != "" && !isValidPart(kindDigest, n.RawDigest) { - return false - } return n.IsFullyQualified() } diff --git a/types/model/name_test.go b/types/model/name_test.go index 008dd586c..794d14d79 100644 --- a/types/model/name_test.go +++ b/types/model/name_test.go @@ -122,21 +122,6 @@ func TestParseNameParts(t *testing.T) { }, wantFilepath: filepath.Join(part350, part80, part80, part80), }, - { - in: "@digest", - want: Name{ - RawDigest: "digest", - }, - wantValidDigest: false, - }, - { - in: "model@sha256:123", - want: Name{ - Model: "model", - RawDigest: "sha256:123", - }, - wantValidDigest: true, - }, } for _, tt := range cases { @@ -160,22 +145,18 @@ var testCases = map[string]bool{ // name -> valid "_why/_the/_lucky:_stiff": true, // minimal - "h/n/m:t@d": true, + "h/n/m:t": true, "host/namespace/model:tag": true, "host/namespace/model": false, "namespace/model": false, "model": false, - "@sha256-1000000000000000000000000000000000000000000000000000000000000000": false, - "model@sha256-1000000000000000000000000000000000000000000000000000000000000000": false, - "model@sha256:1000000000000000000000000000000000000000000000000000000000000000": false, // long (but valid) part80 + "/" + part80 + "/" + part80 + ":" + part80: true, part350 + "/" + part80 + "/" + part80 + ":" + part80: true, - "h/nn/mm:t@sha256-1000000000000000000000000000000000000000000000000000000000000000": true, // bare minimum part sizes - "h/nn/mm:t@sha256:1000000000000000000000000000000000000000000000000000000000000000": true, // bare minimum part sizes + "h/nn/mm:t": true, // bare minimum part sizes // unqualified "m": false, @@ -196,11 +177,10 @@ var testCases = map[string]bool{ // name -> valid "@": false, // not starting with alphanum - "-hh/nn/mm:tt@dd": false, - "hh/-nn/mm:tt@dd": false, - "hh/nn/-mm:tt@dd": false, - "hh/nn/mm:-tt@dd": false, - "hh/nn/mm:tt@-dd": false, + "-hh/nn/mm:tt": false, + "hh/-nn/mm:tt": false, + "hh/nn/-mm:tt": false, + "hh/nn/mm:-tt": false, // hosts "host:https/namespace/model:tag": true, @@ -334,7 +314,7 @@ func FuzzName(f *testing.F) { f.Fuzz(func(t *testing.T, s string) { n := ParseNameBare(s) if n.IsValid() { - parts := [...]string{n.Host, n.Namespace, n.Model, n.Tag, n.RawDigest} + parts := [...]string{n.Host, n.Namespace, n.Model, n.Tag} for _, part := range parts { if part == ".." { t.Errorf("unexpected .. as valid part") From 9d30f9f8b3836e8d617eadf63a71d8363ff56c7e Mon Sep 17 00:00:00 2001 From: Daniel Hiltgen Date: Fri, 5 Jul 2024 12:25:53 -0700 Subject: [PATCH 16/33] Always go build in CI generate steps With the recent cgo changes, bugs can sneak through if we don't make sure to `go build` all the permutations --- .github/workflows/test.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 29adf56f3..13d1c957c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -58,6 +58,7 @@ jobs: runs-on: ${{ matrix.os }} env: GOARCH: ${{ matrix.arch }} + CGO_ENABLED: '1' steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v5 @@ -79,6 +80,7 @@ jobs: - run: go generate -x ./... if: ${{ ! startsWith(matrix.os, 'windows-') }} name: 'Unix Go Generate' + - run: go build . - uses: actions/upload-artifact@v4 with: name: ${{ matrix.os }}-${{ matrix.arch }}-libraries From 4fd5f3526a116d05cd74cfcc7217d4e6326e1bea Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 19:07:01 -0400 Subject: [PATCH 17/33] fix cmake build (#5505) --- llm/ext_server/CMakeLists.txt | 29 ++++++++++++++++------------- llm/generate/gen_common.sh | 1 + 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/llm/ext_server/CMakeLists.txt b/llm/ext_server/CMakeLists.txt index 9de50739c..c300244f9 100644 --- a/llm/ext_server/CMakeLists.txt +++ b/llm/ext_server/CMakeLists.txt @@ -1,14 +1,17 @@ - -set(TARGET ollama_llama_server) -option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) -install(TARGETS ${TARGET} RUNTIME) -target_compile_definitions(${TARGET} PRIVATE - SERVER_VERBOSE=$ -) -target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) -if (WIN32) - TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) -endif() + +set(TARGET ollama_llama_server) +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) +target_compile_definitions(${TARGET} PRIVATE + SERVER_VERBOSE=$ +) +target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) +install(TARGETS ollama_llama_server ggml llama + RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin" + LIBRARY DESTINATION "${CMAKE_BINARY_DIR}/bin" + COMPONENT ollama_llama_server) +if (WIN32) + TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) +endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) \ No newline at end of file diff --git a/llm/generate/gen_common.sh b/llm/generate/gen_common.sh index da1b06882..23feaf99d 100644 --- a/llm/generate/gen_common.sh +++ b/llm/generate/gen_common.sh @@ -81,6 +81,7 @@ apply_patches() { build() { cmake -S ${LLAMACPP_DIR} -B ${BUILD_DIR} ${CMAKE_DEFS} cmake --build ${BUILD_DIR} ${CMAKE_TARGETS} -j8 + cmake --install ${BUILD_DIR} --component ollama_llama_server } compress() { From 5304b765b2bf934070e06412f6617b97a56ae3d2 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 19:34:21 -0400 Subject: [PATCH 18/33] llm: put back old include dir (#5507) * llm: put back old include dir * llm: update link paths for old submodule commits --- llm/llm.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/llm/llm.go b/llm/llm.go index fb6d4b5c7..98fe7f09a 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,12 +1,13 @@ package llm -// #cgo CFLAGS: -Illama.cpp/include -Illama.cpp/ggml/include -// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/src/libllama.a ${SRCDIR}/build/darwin/arm64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal -// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/src/libllama.a ${SRCDIR}/build/darwin/x86_64_static/ggml/src/libggml.a -lstdc++ -framework Accelerate -framework Metal -// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/src/libllama.a ${SRCDIR}/build/windows/amd64_static/ggml/src/libggml.a -static -lstdc++ -// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/src/libllama.a ${SRCDIR}/build/windows/arm64_static/ggml/src/libggml.a -static -lstdc++ -// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/src/libllama.a ${SRCDIR}/build/linux/x86_64_static/ggml/src/libggml.a -lstdc++ -// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/src/libllama.a ${SRCDIR}/build/linux/arm64_static/ggml/src/libggml.a -lstdc++ +// #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include +// #cgo LDFLAGS: -lllama -lggml -lstdc++ +// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal +// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src +// #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src +// #cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src +// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src +// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src // #include // #include "llama.h" import "C" From 2cc854f8cb5b9670fc53134f8104569c60d535be Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 21:48:31 -0400 Subject: [PATCH 19/33] llm: fix missing dylibs by restoring old build behavior on Linux and macOS (#5511) * Revert "fix cmake build (#5505)" This reverts commit 4fd5f3526a116d05cd74cfcc7217d4e6326e1bea. * llm: fix missing dylibs by restoring old build behavior * crlf -> lf --- llm/ext_server/CMakeLists.txt | 28 ++++++++++++---------------- llm/generate/gen_common.sh | 1 - llm/generate/gen_darwin.sh | 6 +++--- llm/generate/gen_linux.sh | 2 +- 4 files changed, 16 insertions(+), 21 deletions(-) diff --git a/llm/ext_server/CMakeLists.txt b/llm/ext_server/CMakeLists.txt index c300244f9..b63f3c0e5 100644 --- a/llm/ext_server/CMakeLists.txt +++ b/llm/ext_server/CMakeLists.txt @@ -1,17 +1,13 @@ - -set(TARGET ollama_llama_server) -option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) -include_directories(${CMAKE_CURRENT_SOURCE_DIR}) -add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) -target_compile_definitions(${TARGET} PRIVATE - SERVER_VERBOSE=$ -) -target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) -install(TARGETS ollama_llama_server ggml llama - RUNTIME DESTINATION "${CMAKE_BINARY_DIR}/bin" - LIBRARY DESTINATION "${CMAKE_BINARY_DIR}/bin" - COMPONENT ollama_llama_server) -if (WIN32) - TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) -endif() +set(TARGET ollama_llama_server) +option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +add_executable(${TARGET} server.cpp utils.hpp json.hpp httplib.h) +install(TARGETS ${TARGET} RUNTIME) +target_compile_definitions(${TARGET} PRIVATE + SERVER_VERBOSE=$ +) +target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT}) +if (WIN32) + TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32) +endif() target_compile_features(${TARGET} PRIVATE cxx_std_11) \ No newline at end of file diff --git a/llm/generate/gen_common.sh b/llm/generate/gen_common.sh index 23feaf99d..da1b06882 100644 --- a/llm/generate/gen_common.sh +++ b/llm/generate/gen_common.sh @@ -81,7 +81,6 @@ apply_patches() { build() { cmake -S ${LLAMACPP_DIR} -B ${BUILD_DIR} ${CMAKE_DEFS} cmake --build ${BUILD_DIR} ${CMAKE_TARGETS} -j8 - cmake --install ${BUILD_DIR} --component ollama_llama_server } compress() { diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index 02577545a..8b4779f95 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -18,7 +18,7 @@ sign() { fi } -COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off" +COMMON_DARWIN_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off" case "${GOARCH}" in "amd64") @@ -27,7 +27,7 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build @@ -75,7 +75,7 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" + CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index c36862520..2bea1c4e6 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -51,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then export CUDACXX=$(command -v nvcc) fi fi -COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off" +COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off" source $(dirname $0)/gen_common.sh init_vars git_module_setup From e0348d3fe8042b7e378a7cbcee95d17d20a14017 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Fri, 5 Jul 2024 22:42:42 -0400 Subject: [PATCH 20/33] llm: add `COMMON_DARWIN_DEFS` to arm static build (#5513) --- llm/generate/gen_darwin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/generate/gen_darwin.sh b/llm/generate/gen_darwin.sh index 8b4779f95..6c0b62cb7 100755 --- a/llm/generate/gen_darwin.sh +++ b/llm/generate/gen_darwin.sh @@ -75,7 +75,7 @@ case "${GOARCH}" in # Static build for linking into the Go binary init_vars CMAKE_TARGETS="--target llama --target ggml" - CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" + CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}" BUILD_DIR="../build/darwin/${ARCH}_static" echo "Building static library" build From 9ae146993e9ec834b95d038df1eecac68a744f18 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 03:27:05 -0400 Subject: [PATCH 21/33] llm: add `GGML_STATIC` flag to windows static lib --- llm/generate/gen_windows.ps1 | 1 + 1 file changed, 1 insertion(+) diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 5c6943502..123c44cc1 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -204,6 +204,7 @@ function build_static() { "-DCMAKE_C_COMPILER=gcc.exe", "-DCMAKE_CXX_COMPILER=g++.exe", "-DBUILD_SHARED_LIBS=off", + "-DGGML_STATIC=on", "-DGGML_NATIVE=off", "-DGGML_AVX=off", "-DGGML_AVX2=off", From f1a379aa566f7a9fefb2a64ac35faf34d9c00812 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 12:54:02 -0400 Subject: [PATCH 22/33] llm: statically link pthread and stdc++ dependencies in windows build --- llm/generate/gen_windows.ps1 | 1 - llm/llm.go | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/generate/gen_windows.ps1 b/llm/generate/gen_windows.ps1 index 123c44cc1..5c6943502 100644 --- a/llm/generate/gen_windows.ps1 +++ b/llm/generate/gen_windows.ps1 @@ -204,7 +204,6 @@ function build_static() { "-DCMAKE_C_COMPILER=gcc.exe", "-DCMAKE_CXX_COMPILER=g++.exe", "-DBUILD_SHARED_LIBS=off", - "-DGGML_STATIC=on", "-DGGML_NATIVE=off", "-DGGML_AVX=off", "-DGGML_AVX2=off", diff --git a/llm/llm.go b/llm/llm.go index 98fe7f09a..3cd162e0c 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,7 +1,8 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include -// #cgo LDFLAGS: -lllama -lggml -lstdc++ +// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread +// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From 5796bfc4013f4ebe26cdbf13554332a25c405027 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 14:06:20 -0400 Subject: [PATCH 23/33] llm: only statically link libstdc++ --- .github/workflows/release.yaml | 4 ++++ llm/llm.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 61ca3c433..1042c6845 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -304,6 +304,10 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" + - name: remove unwanted mingw dll.a files + run: | + Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libpthread.dll.a" + Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libwinpthread.dll.a" - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/llm/llm.go b/llm/llm.go index 3cd162e0c..ac6a52490 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,8 +1,8 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include +// #cgo windows LDFLAGS: -static-libstdc++ // #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread -// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From 6cea0360276e5fc7e2fecbe0cadf89cc72615279 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 15:10:48 -0400 Subject: [PATCH 24/33] Revert "llm: only statically link libstdc++" This reverts commit 5796bfc4013f4ebe26cdbf13554332a25c405027. --- .github/workflows/release.yaml | 4 ---- llm/llm.go | 2 +- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 1042c6845..61ca3c433 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -304,10 +304,6 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" - - name: remove unwanted mingw dll.a files - run: | - Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libpthread.dll.a" - Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libwinpthread.dll.a" - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/llm/llm.go b/llm/llm.go index ac6a52490..3cd162e0c 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -1,8 +1,8 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include -// #cgo windows LDFLAGS: -static-libstdc++ // #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread +// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From a08f20d910194edff79d45315330a088fda3f136 Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 15:21:15 -0400 Subject: [PATCH 25/33] release: remove unwanted mingw dll.a files --- .github/workflows/release.yaml | 5 +++++ llm/llm.go | 1 - 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 61ca3c433..d1faf9f5b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -85,6 +85,11 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" + - name: remove unwanted mingw dll.a files + run: | + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force - uses: actions/setup-go@v5 with: go-version-file: go.mod diff --git a/llm/llm.go b/llm/llm.go index 3cd162e0c..88c0258d6 100644 --- a/llm/llm.go +++ b/llm/llm.go @@ -2,7 +2,6 @@ package llm // #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include // #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread -// #cgo windows LDFLAGS: -static-libstdc++ -static-libgcc -static // #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal // #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src // #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src From c12f1c5b99c9d9f9388f464aa77063987fdb8f0f Mon Sep 17 00:00:00 2001 From: jmorganca Date: Sat, 6 Jul 2024 16:12:29 -0400 Subject: [PATCH 26/33] release: move mingw library cleanup to correct job --- .github/workflows/release.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d1faf9f5b..0005c69d3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -85,11 +85,6 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" - - name: remove unwanted mingw dll.a files - run: | - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force - Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force - uses: actions/setup-go@v5 with: go-version-file: go.mod @@ -309,6 +304,11 @@ jobs: write-host "Installing plugin" & "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet write-host "plugin installed" + - name: remove unwanted mingw dll.a files + run: | + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libwinpthread.dll.a" -File | Remove-Item -Force + Get-ChildItem -Path "C:\mingw64" -Recurse -Filter "libstdc++.dll.a" -File | Remove-Item -Force - uses: actions/setup-go@v5 with: go-version-file: go.mod From 4607c706413f1354d0e762d25a9a0a933edc14ec Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 6 Jul 2024 18:58:16 -0400 Subject: [PATCH 27/33] llm: add `-DBUILD_SHARED_LIBS=off` to common cpu cmake flags (#5520) --- llm/generate/gen_linux.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llm/generate/gen_linux.sh b/llm/generate/gen_linux.sh index 2bea1c4e6..d3e2d13ba 100755 --- a/llm/generate/gen_linux.sh +++ b/llm/generate/gen_linux.sh @@ -77,7 +77,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then if [ -n "${OLLAMA_CUSTOM_CPU_DEFS}" ]; then init_vars echo "OLLAMA_CUSTOM_CPU_DEFS=\"${OLLAMA_CUSTOM_CPU_DEFS}\"" - CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" + CMAKE_DEFS="${OLLAMA_CUSTOM_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on ${CMAKE_DEFS}" BUILD_DIR="../build/linux/${ARCH}/cpu" echo "Building custom CPU" build @@ -93,7 +93,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then # -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake # -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake - COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off" + COMMON_CPU_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off" if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then # # CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta) From f8241bfba384cf8c888847dc44b73d7f43a42d82 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 6 Jul 2024 19:35:04 -0400 Subject: [PATCH 28/33] gpu: report system free memory instead of 0 (#5521) --- gpu/gpu_darwin.go | 2 +- gpu/gpu_info_darwin.h | 1 + gpu/gpu_info_darwin.m | 26 ++++++++++++++++++++++++-- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/gpu/gpu_darwin.go b/gpu/gpu_darwin.go index f26d23c12..39d8fcf89 100644 --- a/gpu/gpu_darwin.go +++ b/gpu/gpu_darwin.go @@ -56,7 +56,7 @@ func GetCPUInfo() GpuInfoList { func GetCPUMem() (memInfo, error) { return memInfo{ TotalMemory: uint64(C.getPhysicalMemory()), - FreeMemory: 0, + FreeMemory: uint64(C.getFreeMemory()), }, nil } diff --git a/gpu/gpu_info_darwin.h b/gpu/gpu_info_darwin.h index 3edca237c..415e7922d 100644 --- a/gpu/gpu_info_darwin.h +++ b/gpu/gpu_info_darwin.h @@ -2,3 +2,4 @@ #include uint64_t getRecommendedMaxVRAM(); uint64_t getPhysicalMemory(); +uint64_t getFreeMemory(); diff --git a/gpu/gpu_info_darwin.m b/gpu/gpu_info_darwin.m index a145ac076..5ca139e0b 100644 --- a/gpu/gpu_info_darwin.m +++ b/gpu/gpu_info_darwin.m @@ -1,4 +1,5 @@ -// go:build darwin +#import +#import #include "gpu_info_darwin.h" uint64_t getRecommendedMaxVRAM() { @@ -8,6 +9,27 @@ uint64_t getRecommendedMaxVRAM() { return result; } +// getPhysicalMemory returns the total physical memory in bytes uint64_t getPhysicalMemory() { - return [[NSProcessInfo processInfo] physicalMemory]; + return [NSProcessInfo processInfo].physicalMemory; +} + +// getFreeMemory returns the total free memory in bytes, including inactive +// memory that can be reclaimed by the system. +uint64_t getFreeMemory() { + mach_port_t host_port = mach_host_self(); + mach_msg_type_number_t host_size = sizeof(vm_statistics64_data_t) / sizeof(integer_t); + vm_size_t pagesize; + vm_statistics64_data_t vm_stat; + + host_page_size(host_port, &pagesize); + if (host_statistics64(host_port, HOST_VM_INFO64, (host_info64_t)&vm_stat, &host_size) != KERN_SUCCESS) { + return 0; + } + + uint64_t free_memory = (uint64_t)vm_stat.free_count * pagesize; + free_memory += (uint64_t)vm_stat.speculative_count * pagesize; + free_memory += (uint64_t)vm_stat.inactive_count * pagesize; + + return free_memory; } From 0ee87615c74c69d8fbc3cad8f3ea5a2364b1a876 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 6 Jul 2024 22:01:52 -0400 Subject: [PATCH 29/33] sched: don't error if paging to disk on Windows and macOS (#5523) --- server/sched.go | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/server/sched.go b/server/sched.go index 8c054c6b4..9dff2ae07 100644 --- a/server/sched.go +++ b/server/sched.go @@ -197,25 +197,36 @@ func (s *Scheduler) processPending(ctx context.Context) { break } - // Block attempting to load a model larger than system memory + GPU memory estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts) maxSize := systemMem.FreeMemory - for _, gpu := range gpus { - if gpu.Library == "cpu" { - continue - } - if loadedCount == 0 { - // If no other models are loaded, set the limit based on what's available - maxSize += gpu.FreeMemory - } else { - // Other models could be unloaded, favor total memory for limit - maxSize += gpu.TotalMemory + + // Add available GPU memory to the total pool + // macOS hardware has unified memory so don't double count + if runtime.GOOS != "darwin" { + for _, gpu := range gpus { + if gpu.Library == "cpu" { + continue + } + if loadedCount == 0 { + // If no other models are loaded, set the limit based on what's available + maxSize += gpu.FreeMemory + } else { + // Other models could be unloaded, favor total memory for limit + maxSize += gpu.TotalMemory + } } } + + // Block attempting to load a model larger than system memory + GPU memory if estimate.TotalSize > maxSize { slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize)) - pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) - break + + // Linux will crash if over-allocating memory - return an error to the user. + // TODO (jmorganca): add reasonable upper limits for darwin and windows as well + if runtime.GOOS == "linux" { + pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize)) + break + } } // Evaluate if the model will fit in the available system memory, or if we should unload a model first From 0e09c380fcae8b81db3c3447d70d721cfad00dbd Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 12:38:04 -0400 Subject: [PATCH 30/33] llm: print caching notices in debug only (#5533) --- llm/ext_server/server.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 00a15b4a3..7ae58e382 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1413,7 +1413,7 @@ struct llama_server_context return get_slot(-1); } - LOG_INFO("slot with common prefix found", {{ + LOG_DEBUG("slot with common prefix found", {{ "slot_id", slot->id, "characters", longest }}); From 571dc61955ced560a45e9d32b1cd2a52d9803c8c Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 13:03:09 -0400 Subject: [PATCH 31/33] Update llama.cpp submodule to `a8db2a9c` (#5530) --- llm/llama.cpp | 2 +- llm/patches/05-default-pretokenizer.diff | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/llm/llama.cpp b/llm/llama.cpp index d7fd29fff..a8db2a9ce 160000 --- a/llm/llama.cpp +++ b/llm/llama.cpp @@ -1 +1 @@ -Subproject commit d7fd29fff16456ce9c3a23fd2d09a66256b05aff +Subproject commit a8db2a9ce64cd4417f6a312ab61858f17f0f8584 diff --git a/llm/patches/05-default-pretokenizer.diff b/llm/patches/05-default-pretokenizer.diff index f4eaced72..341a6f590 100644 --- a/llm/patches/05-default-pretokenizer.diff +++ b/llm/patches/05-default-pretokenizer.diff @@ -1,11 +1,11 @@ diff --git a/src/llama.cpp b/src/llama.cpp -index 73f52435..2b81b4bd 100644 +index 2b9ace28..172640e2 100644 --- a/src/llama.cpp +++ b/src/llama.cpp -@@ -5092,16 +5092,7 @@ static void llm_load_vocab( - - // for now, only BPE models have pre-tokenizers +@@ -5357,16 +5357,7 @@ static void llm_load_vocab( if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = true; - if (tokenizer_pre.empty()) { - LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__); - LLAMA_LOG_WARN("%s: \n", __func__); @@ -20,7 +20,7 @@ index 73f52435..2b81b4bd 100644 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; } else if ( tokenizer_pre == "llama3" || -@@ -5164,7 +5155,8 @@ static void llm_load_vocab( +@@ -5439,7 +5430,8 @@ static void llm_load_vocab( tokenizer_pre == "jais") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS; } else { From d8def1ff9432ef60d1067e5e6dde0d700dd95021 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 13:41:51 -0400 Subject: [PATCH 32/33] llm: allow gemma 2 to context shift (#5534) --- llm/ext_server/server.cpp | 29 +---------------------------- 1 file changed, 1 insertion(+), 28 deletions(-) diff --git a/llm/ext_server/server.cpp b/llm/ext_server/server.cpp index 7ae58e382..0ef3956ec 100644 --- a/llm/ext_server/server.cpp +++ b/llm/ext_server/server.cpp @@ -1688,22 +1688,8 @@ struct llama_server_context } slot.params.n_keep = std::min(slot.n_ctx - 4, slot.params.n_keep); - char buf[256]; - llama_model_meta_val_str(model, "general.architecture", buf, 256); - bool gemma2 = strcmp(buf, "gemma2") == 0; - - int32_t truncate_at = slot.n_ctx; - - // truncate at 2/3 of the context length for gemma2 models - // as they do not support context shifts (from the sliding window implementation). - // this way, prompts that almost fit the context length can still generate a full - // response without a sudden stop from hitting the context limit - if (gemma2) { - truncate_at = 2 * slot.n_ctx / 3; - } - // if input prompt is too big, truncate it, if group attention self-extend is disabled - if (slot.ga_n == 1 && slot.n_prompt_tokens >= truncate_at) + if (slot.ga_n == 1 && slot.n_prompt_tokens >= slot.n_ctx) { const int n_left = slot.n_ctx - slot.params.n_keep; const int n_shift = n_left / 2; @@ -1731,19 +1717,6 @@ struct llama_server_context GGML_ASSERT(slot.n_prompt_tokens < slot.n_ctx); } - // Models with sliding window attention do not work with context shifts, so - // limit their prediction to the context length - if (gemma2) { - int32_t limit = slot.n_ctx - slot.n_prompt_tokens; - slot.n_predict = limit; - slot.params.n_predict = limit; - LOG_INFO("model does not support sliding window, limiting generation", { - {"n_ctx", slot.n_ctx}, - {"n_prompt_tokens", slot.n_prompt_tokens}, - {"n_predict", slot.n_predict} - }); - } - if (!slot.params.cache_prompt) { llama_sampling_reset(slot.ctx_sampling); From 53da2c69654769c0c086af695722e1d9b9ee6ecc Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sun, 7 Jul 2024 14:32:05 -0400 Subject: [PATCH 33/33] llm: remove ambiguous comment when putting upper limit on predictions to avoid infinite generation (#5535) --- llm/server.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/llm/server.go b/llm/server.go index 206f9e391..54fad92ce 100644 --- a/llm/server.go +++ b/llm/server.go @@ -699,10 +699,9 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu } defer s.sem.Release(1) - // only allow maximum 10 "context shifts" to avoid infinite generation + // put an upper limit on num_predict to avoid the model running on forever if req.Options.NumPredict < 0 || req.Options.NumPredict > 10*s.options.NumCtx { req.Options.NumPredict = 10 * s.options.NumCtx - slog.Debug("setting token limit to 10x num_ctx", "num_ctx", s.options.NumCtx, "num_predict", req.Options.NumPredict) } request := map[string]any{