From f8453e9d4a15f5f54b610993e8647d252cb65626 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 13 Feb 2025 22:37:59 -0800 Subject: [PATCH] llm: attempt to evaluate symlinks, but do not fail (#9089) provides a better approach to #9088 that will attempt to evaluate symlinks (important for macOS where 'ollama' is often a symlink), but use the result of os.Executable() as a fallback in scenarios where filepath.EvalSymlinks fails due to permission erorrs or other issues --- discover/path.go | 4 ++++ llm/server.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/discover/path.go b/discover/path.go index 23aa8110d..8a20d8c21 100644 --- a/discover/path.go +++ b/discover/path.go @@ -19,6 +19,10 @@ var LibOllamaPath string = func() string { return "" } + if eval, err := filepath.EvalSymlinks(exe); err == nil { + exe = eval + } + var libPath string switch runtime.GOOS { case "windows": diff --git a/llm/server.go b/llm/server.go index 7f5240d98..8dce5ee8f 100644 --- a/llm/server.go +++ b/llm/server.go @@ -320,6 +320,10 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, ggml *GGML, adapter return nil, fmt.Errorf("unable to lookup executable path: %w", err) } + if eval, err := filepath.EvalSymlinks(exe); err == nil { + exe = eval + } + // TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access s := &llmServer{ port: port,