diff --git a/cmd/cmd.go b/cmd/cmd.go index 159de9a6a..c22a08f43 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -34,7 +34,6 @@ import ( "github.com/ollama/ollama/api" "github.com/ollama/ollama/envconfig" "github.com/ollama/ollama/format" - "github.com/ollama/ollama/llama" "github.com/ollama/ollama/parser" "github.com/ollama/ollama/progress" "github.com/ollama/ollama/runner" @@ -1281,7 +1280,6 @@ func NewCLI() *cobra.Command { runnerCmd := &cobra.Command{ Use: "runner", - Short: llama.PrintSystemInfo(), Hidden: true, RunE: func(cmd *cobra.Command, args []string) error { return runner.Execute(os.Args[1:]) diff --git a/llama/llama.go b/llama/llama.go index 0c4fca430..bb5028bd9 100644 --- a/llama/llama.go +++ b/llama/llama.go @@ -21,18 +21,6 @@ package llama extern bool llamaProgressCallback(float progress, void *user_data); extern void llamaLog(int level, char* text, void* user_data); - -typedef enum {COMP_UNKNOWN,COMP_GCC,COMP_CLANG} COMPILER; -COMPILER inline get_compiler() { -#if defined(__clang__) - return COMP_CLANG; -#elif defined(__GNUC__) - return COMP_GCC; -#else - return UNKNOWN_COMPILER; -#endif -} - */ import "C" @@ -72,19 +60,6 @@ func BackendInit() { C.llama_backend_init() } -func PrintSystemInfo() string { - var compiler string - switch C.get_compiler() { - case C.COMP_UNKNOWN: - compiler = "cgo(unknown_compiler)" - case C.COMP_GCC: - compiler = "cgo(gcc)" - case C.COMP_CLANG: - compiler = "cgo(clang)" - } - return C.GoString(C.llama_print_system_info()) + compiler -} - func GetModelArch(modelPath string) (string, error) { mp := C.CString(modelPath) defer C.free(unsafe.Pointer(mp)) diff --git a/ml/backend.go b/ml/backend.go index 83b7a8c9c..3ef8a1ac2 100644 --- a/ml/backend.go +++ b/ml/backend.go @@ -24,7 +24,6 @@ type Backend interface { Config() Config Get(name string) Tensor NewContext() Context - SystemInfo() string } // BackendCacheConfig should be implemented by backends that need special output diff --git a/ml/backend/ggml/ggml.go b/ml/backend/ggml/ggml.go index f4948fcad..2d8ddf99f 100644 --- a/ml/backend/ggml/ggml.go +++ b/ml/backend/ggml/ggml.go @@ -1,27 +1,11 @@ package ggml -/* -#cgo CPPFLAGS: -I${SRCDIR}/ggml/include -#include -#include -#include "ggml.h" -#include "ggml-cpu.h" -#include "ggml-backend.h" -static struct ggml_backend_feature * getBackendFeatures(void *fp, ggml_backend_reg_t reg) {return ((ggml_backend_get_features_t)(fp))(reg);} -static struct ggml_backend_feature * getNextBackendFeatures(struct ggml_backend_feature * feature) { return &feature[1];} - -typedef enum {COMP_UNKNOWN,COMP_GCC,COMP_CLANG} COMPILER; -COMPILER inline get_compiler() { -#if defined(__clang__) - return COMP_CLANG; -#elif defined(__GNUC__) - return COMP_GCC; -#else - return UNKNOWN_COMPILER; -#endif -} - -*/ +// #cgo CPPFLAGS: -I${SRCDIR}/ggml/include +// #include +// #include +// #include "ggml.h" +// #include "ggml-cpu.h" +// #include "ggml-backend.h" import "C" import ( @@ -729,34 +713,3 @@ func (t *Tensor) ScaledDotProductAttention(ctx ml.Context, key, value, mask ml.T return kqv.Permute(ctx, 0, 2, 1, 3).Contiguous(ctx) } } - -func (b *Backend) SystemInfo() string { - var compiler string - switch C.get_compiler() { - case C.COMP_UNKNOWN: - compiler = "cgo(unknown_compiler)" - case C.COMP_GCC: - compiler = "cgo(gcc)" - case C.COMP_CLANG: - compiler = "cgo(clang)" - } - - var s string - for i := range C.ggml_backend_reg_count() { - reg := C.ggml_backend_reg_get(i) - fName := C.CString("ggml_backend_get_features") - defer C.free(unsafe.Pointer(fName)) - get_features_fn := C.ggml_backend_reg_get_proc_address(reg, fName) - if get_features_fn != nil { - s += C.GoString(C.ggml_backend_reg_name(reg)) - s += " : " - for features := C.getBackendFeatures(get_features_fn, reg); features.name != nil; features = C.getNextBackendFeatures(features) { - s += C.GoString(features.name) - s += " = " - s += C.GoString(features.value) - s += " | " - } - } - } - return s + compiler -} diff --git a/ml/backend/ggml/ggml/src/ggml.go b/ml/backend/ggml/ggml/src/ggml.go index 85c693eba..afc1e1edd 100644 --- a/ml/backend/ggml/ggml/src/ggml.go +++ b/ml/backend/ggml/ggml/src/ggml.go @@ -7,6 +7,20 @@ package ggml // #include // #include "ggml-backend.h" // extern void sink(int level, char *text, void *user_data); +// static struct ggml_backend_feature * first_feature(ggml_backend_get_features_t fp, ggml_backend_reg_t reg) { return fp(reg); } +// static struct ggml_backend_feature * next_feature(struct ggml_backend_feature * feature) { return &feature[1]; } +/* +typedef enum { COMPILER_CLANG, COMPILER_GNUC, COMPILER_UNKNOWN } COMPILER; +static COMPILER compiler_name(void) { +#if defined(__clang__) + return COMPILER_CLANG; +#elif defined(__GNUC__) + return COMPILER_GNUC; +#else + return COMPILER_UNKNOWN; +#endif +} +*/ import "C" import ( @@ -16,6 +30,7 @@ import ( "os" "path/filepath" "runtime" + "strconv" "strings" "sync" "unsafe" @@ -90,4 +105,43 @@ var OnceLoad = sync.OnceFunc(func() { visited[abspath] = struct{}{} } } + + slog.Info("system", "", system{}) }) + +type system struct{} + +func (system) LogValue() slog.Value { + var attrs []slog.Attr + names := make(map[string]int) + for i := range C.ggml_backend_dev_count() { + r := C.ggml_backend_dev_backend_reg(C.ggml_backend_dev_get(i)) + + func() { + fName := C.CString("ggml_backend_get_features") + defer C.free(unsafe.Pointer(fName)) + + if fn := C.ggml_backend_reg_get_proc_address(r, fName); fn != nil { + var features []any + for f := C.first_feature(C.ggml_backend_get_features_t(fn), r); f.name != nil; f = C.next_feature(f) { + features = append(features, C.GoString(f.name), C.GoString(f.value)) + } + + name := C.GoString(C.ggml_backend_reg_name(r)) + attrs = append(attrs, slog.Group(name+"."+strconv.Itoa(names[name]), features...)) + names[name] += 1 + } + }() + } + + switch C.compiler_name() { + case C.COMPILER_CLANG: + attrs = append(attrs, slog.String("compiler", "cgo(clang)")) + case C.COMPILER_GNUC: + attrs = append(attrs, slog.String("compiler", "cgo(gcc)")) + default: + attrs = append(attrs, slog.String("compiler", "cgo(unknown)")) + } + + return slog.GroupValue(attrs...) +} diff --git a/runner/llamarunner/runner.go b/runner/llamarunner/runner.go index 82880c980..8662afc1e 100644 --- a/runner/llamarunner/runner.go +++ b/runner/llamarunner/runner.go @@ -931,7 +931,6 @@ func Execute(args []string) error { slog.Info("starting go runner") llama.BackendInit() - slog.Info("system", "info", llama.PrintSystemInfo(), "threads", *threads) server := &Server{ batchSize: *batchSize, diff --git a/runner/ollamarunner/runner.go b/runner/ollamarunner/runner.go index 5705931ad..1a4bbf19e 100644 --- a/runner/ollamarunner/runner.go +++ b/runner/ollamarunner/runner.go @@ -786,8 +786,6 @@ func (s *Server) loadModel( panic(err) } - slog.Info("system", "info", s.model.Backend().SystemInfo(), "threads", params.NumThreads) - // TODO(jessegross): LoRA loading if lpath.String() != "" { panic("loras are not yet implemented")