Compare commits
19 Commits
parth/samp
...
pdevine/lo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
73a1e99f8a | ||
|
|
543240fb5f | ||
|
|
4bed739259 | ||
|
|
80c7ce381b | ||
|
|
ccfd41c4f0 | ||
|
|
3e102b7dad | ||
|
|
ec46f3286c | ||
|
|
5e2e0b46b1 | ||
|
|
45a13b1dec | ||
|
|
5c0b663969 | ||
|
|
30d7a59ba8 | ||
|
|
4aeb67ef4c | ||
|
|
3ba91634c1 | ||
|
|
1b7433b71e | ||
|
|
a70820daa0 | ||
|
|
6b45b1d6b4 | ||
|
|
85ab552028 | ||
|
|
b3af953a55 | ||
|
|
ad4e0bf3be |
@@ -54,6 +54,10 @@ Here are some example models that can be downloaded:
|
|||||||
|
|
||||||
| Model | Parameters | Size | Download |
|
| Model | Parameters | Size | Download |
|
||||||
| ------------------ | ---------- | ----- | -------------------------------- |
|
| ------------------ | ---------- | ----- | -------------------------------- |
|
||||||
|
| Gemma 3 | 1B | 815MB | `ollama run gemma3:1b` |
|
||||||
|
| Gemma 3 | 4B | 3.3GB | `ollama run gemma3` |
|
||||||
|
| Gemma 3 | 12B | 8.1GB | `ollama run gemma3:12b` |
|
||||||
|
| Gemma 3 | 27B | 17GB | `ollama run gemma3:27b` |
|
||||||
| QwQ | 32B | 20GB | `ollama run qwq` |
|
| QwQ | 32B | 20GB | `ollama run qwq` |
|
||||||
| DeepSeek-R1 | 7B | 4.7GB | `ollama run deepseek-r1` |
|
| DeepSeek-R1 | 7B | 4.7GB | `ollama run deepseek-r1` |
|
||||||
| DeepSeek-R1 | 671B | 404GB | `ollama run deepseek-r1:671b` |
|
| DeepSeek-R1 | 671B | 404GB | `ollama run deepseek-r1:671b` |
|
||||||
@@ -66,9 +70,6 @@ Here are some example models that can be downloaded:
|
|||||||
| Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` |
|
| Llama 3.1 | 405B | 231GB | `ollama run llama3.1:405b` |
|
||||||
| Phi 4 | 14B | 9.1GB | `ollama run phi4` |
|
| Phi 4 | 14B | 9.1GB | `ollama run phi4` |
|
||||||
| Phi 4 Mini | 3.8B | 2.5GB | `ollama run phi4-mini` |
|
| Phi 4 Mini | 3.8B | 2.5GB | `ollama run phi4-mini` |
|
||||||
| Gemma 2 | 2B | 1.6GB | `ollama run gemma2:2b` |
|
|
||||||
| Gemma 2 | 9B | 5.5GB | `ollama run gemma2` |
|
|
||||||
| Gemma 2 | 27B | 16GB | `ollama run gemma2:27b` |
|
|
||||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||||
|
|||||||
@@ -349,6 +349,7 @@ type ShowResponse struct {
|
|||||||
Messages []Message `json:"messages,omitempty"`
|
Messages []Message `json:"messages,omitempty"`
|
||||||
ModelInfo map[string]any `json:"model_info,omitempty"`
|
ModelInfo map[string]any `json:"model_info,omitempty"`
|
||||||
ProjectorInfo map[string]any `json:"projector_info,omitempty"`
|
ProjectorInfo map[string]any `json:"projector_info,omitempty"`
|
||||||
|
Tensors []Tensor `json:"tensors,omitempty"`
|
||||||
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -467,6 +468,13 @@ type ModelDetails struct {
|
|||||||
QuantizationLevel string `json:"quantization_level"`
|
QuantizationLevel string `json:"quantization_level"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tensor describes the metadata for a given tensor.
|
||||||
|
type Tensor struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Shape []uint64 `json:"shape"`
|
||||||
|
}
|
||||||
|
|
||||||
func (m *Metrics) Summary() {
|
func (m *Metrics) Summary() {
|
||||||
if m.TotalDuration > 0 {
|
if m.TotalDuration > 0 {
|
||||||
fmt.Fprintf(os.Stderr, "total duration: %v\n", m.TotalDuration)
|
fmt.Fprintf(os.Stderr, "total duration: %v\n", m.TotalDuration)
|
||||||
|
|||||||
50
cmd/cmd.go
50
cmd/cmd.go
@@ -18,6 +18,7 @@ import (
|
|||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -568,8 +569,9 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
parameters, errParams := cmd.Flags().GetBool("parameters")
|
parameters, errParams := cmd.Flags().GetBool("parameters")
|
||||||
system, errSystem := cmd.Flags().GetBool("system")
|
system, errSystem := cmd.Flags().GetBool("system")
|
||||||
template, errTemplate := cmd.Flags().GetBool("template")
|
template, errTemplate := cmd.Flags().GetBool("template")
|
||||||
|
verbose, errVerbose := cmd.Flags().GetBool("verbose")
|
||||||
|
|
||||||
for _, boolErr := range []error{errLicense, errModelfile, errParams, errSystem, errTemplate} {
|
for _, boolErr := range []error{errLicense, errModelfile, errParams, errSystem, errTemplate, errVerbose} {
|
||||||
if boolErr != nil {
|
if boolErr != nil {
|
||||||
return errors.New("error retrieving flags")
|
return errors.New("error retrieving flags")
|
||||||
}
|
}
|
||||||
@@ -607,7 +609,7 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
return errors.New("only one of '--license', '--modelfile', '--parameters', '--system', or '--template' can be specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
req := api.ShowRequest{Name: args[0]}
|
req := api.ShowRequest{Name: args[0], Verbose: verbose}
|
||||||
resp, err := client.Show(cmd.Context(), &req)
|
resp, err := client.Show(cmd.Context(), &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -630,10 +632,10 @@ func ShowHandler(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return showInfo(resp, os.Stdout)
|
return showInfo(resp, verbose, os.Stdout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func showInfo(resp *api.ShowResponse, w io.Writer) error {
|
func showInfo(resp *api.ShowResponse, verbose bool, w io.Writer) error {
|
||||||
tableRender := func(header string, rows func() [][]string) {
|
tableRender := func(header string, rows func() [][]string) {
|
||||||
fmt.Fprintln(w, " ", header)
|
fmt.Fprintln(w, " ", header)
|
||||||
table := tablewriter.NewWriter(w)
|
table := tablewriter.NewWriter(w)
|
||||||
@@ -690,6 +692,45 @@ func showInfo(resp *api.ShowResponse, w io.Writer) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if resp.ModelInfo != nil && verbose {
|
||||||
|
tableRender("Metadata", func() (rows [][]string) {
|
||||||
|
keys := make([]string, 0, len(resp.ModelInfo))
|
||||||
|
for k := range resp.ModelInfo {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
for _, k := range keys {
|
||||||
|
var v string
|
||||||
|
switch vData := resp.ModelInfo[k].(type) {
|
||||||
|
case string:
|
||||||
|
v = vData
|
||||||
|
case float64:
|
||||||
|
v = fmt.Sprintf("%g", vData)
|
||||||
|
case []any:
|
||||||
|
n := 3
|
||||||
|
if len(vData) < n {
|
||||||
|
n = len(vData)
|
||||||
|
}
|
||||||
|
v = fmt.Sprintf("%v", vData[:n])
|
||||||
|
default:
|
||||||
|
v = fmt.Sprintf("%T", vData)
|
||||||
|
}
|
||||||
|
rows = append(rows, []string{"", k, v})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Tensors) > 0 && verbose {
|
||||||
|
tableRender("Tensors", func() (rows [][]string) {
|
||||||
|
for _, t := range resp.Tensors {
|
||||||
|
rows = append(rows, []string{"", t.Name, t.Type, fmt.Sprint(t.Shape)})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
head := func(s string, n int) (rows [][]string) {
|
head := func(s string, n int) (rows [][]string) {
|
||||||
scanner := bufio.NewScanner(strings.NewReader(s))
|
scanner := bufio.NewScanner(strings.NewReader(s))
|
||||||
for scanner.Scan() && (len(rows) < n || n < 0) {
|
for scanner.Scan() && (len(rows) < n || n < 0) {
|
||||||
@@ -1196,6 +1237,7 @@ func NewCLI() *cobra.Command {
|
|||||||
showCmd.Flags().Bool("parameters", false, "Show parameters of a model")
|
showCmd.Flags().Bool("parameters", false, "Show parameters of a model")
|
||||||
showCmd.Flags().Bool("template", false, "Show template of a model")
|
showCmd.Flags().Bool("template", false, "Show template of a model")
|
||||||
showCmd.Flags().Bool("system", false, "Show system message of a model")
|
showCmd.Flags().Bool("system", false, "Show system message of a model")
|
||||||
|
showCmd.Flags().BoolP("verbose", "v", false, "Show detailed model information")
|
||||||
|
|
||||||
runCmd := &cobra.Command{
|
runCmd := &cobra.Command{
|
||||||
Use: "run MODEL [PROMPT]",
|
Use: "run MODEL [PROMPT]",
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ func TestShowInfo(t *testing.T) {
|
|||||||
ParameterSize: "7B",
|
ParameterSize: "7B",
|
||||||
QuantizationLevel: "FP16",
|
QuantizationLevel: "FP16",
|
||||||
},
|
},
|
||||||
}, &b); err != nil {
|
}, false, &b); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ func TestShowInfo(t *testing.T) {
|
|||||||
ParameterSize: "7B",
|
ParameterSize: "7B",
|
||||||
QuantizationLevel: "FP16",
|
QuantizationLevel: "FP16",
|
||||||
},
|
},
|
||||||
}, &b); err != nil {
|
}, false, &b); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,6 +68,56 @@ func TestShowInfo(t *testing.T) {
|
|||||||
embedding length 0
|
embedding length 0
|
||||||
quantization FP16
|
quantization FP16
|
||||||
|
|
||||||
|
`
|
||||||
|
if diff := cmp.Diff(expect, b.String()); diff != "" {
|
||||||
|
t.Errorf("unexpected output (-want +got):\n%s", diff)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("verbose model", func(t *testing.T) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
if err := showInfo(&api.ShowResponse{
|
||||||
|
Details: api.ModelDetails{
|
||||||
|
Family: "test",
|
||||||
|
ParameterSize: "8B",
|
||||||
|
QuantizationLevel: "FP16",
|
||||||
|
},
|
||||||
|
Parameters: `
|
||||||
|
stop up`,
|
||||||
|
ModelInfo: map[string]any{
|
||||||
|
"general.architecture": "test",
|
||||||
|
"general.parameter_count": float64(8_000_000_000),
|
||||||
|
"test.context_length": float64(1000),
|
||||||
|
"test.embedding_length": float64(11434),
|
||||||
|
},
|
||||||
|
Tensors: []api.Tensor{
|
||||||
|
{Name: "blk.0.attn_k.weight", Type: "BF16", Shape: []uint64{42, 3117}},
|
||||||
|
{Name: "blk.0.attn_q.weight", Type: "FP16", Shape: []uint64{3117, 42}},
|
||||||
|
},
|
||||||
|
}, true, &b); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expect := ` Model
|
||||||
|
architecture test
|
||||||
|
parameters 8B
|
||||||
|
context length 1000
|
||||||
|
embedding length 11434
|
||||||
|
quantization FP16
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
stop up
|
||||||
|
|
||||||
|
Metadata
|
||||||
|
general.architecture test
|
||||||
|
general.parameter_count 8e+09
|
||||||
|
test.context_length 1000
|
||||||
|
test.embedding_length 11434
|
||||||
|
|
||||||
|
Tensors
|
||||||
|
blk.0.attn_k.weight BF16 [42 3117]
|
||||||
|
blk.0.attn_q.weight FP16 [3117 42]
|
||||||
|
|
||||||
`
|
`
|
||||||
if diff := cmp.Diff(expect, b.String()); diff != "" {
|
if diff := cmp.Diff(expect, b.String()); diff != "" {
|
||||||
t.Errorf("unexpected output (-want +got):\n%s", diff)
|
t.Errorf("unexpected output (-want +got):\n%s", diff)
|
||||||
@@ -89,7 +139,7 @@ func TestShowInfo(t *testing.T) {
|
|||||||
stop you
|
stop you
|
||||||
stop up
|
stop up
|
||||||
temperature 99`,
|
temperature 99`,
|
||||||
}, &b); err != nil {
|
}, false, &b); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -126,7 +176,7 @@ func TestShowInfo(t *testing.T) {
|
|||||||
"clip.vision.embedding_length": float64(0),
|
"clip.vision.embedding_length": float64(0),
|
||||||
"clip.vision.projection_dim": float64(0),
|
"clip.vision.projection_dim": float64(0),
|
||||||
},
|
},
|
||||||
}, &b); err != nil {
|
}, false, &b); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,7 +209,7 @@ func TestShowInfo(t *testing.T) {
|
|||||||
Ahoy, matey!
|
Ahoy, matey!
|
||||||
Weigh anchor!
|
Weigh anchor!
|
||||||
`,
|
`,
|
||||||
}, &b); err != nil {
|
}, false, &b); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,7 +238,7 @@ Weigh anchor!
|
|||||||
QuantizationLevel: "FP16",
|
QuantizationLevel: "FP16",
|
||||||
},
|
},
|
||||||
License: license,
|
License: license,
|
||||||
}, &b); err != nil {
|
}, false, &b); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -195,6 +195,10 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
opts.Messages = []api.Message{}
|
opts.Messages = []api.Message{}
|
||||||
fmt.Printf("Loading model '%s'\n", opts.Model)
|
fmt.Printf("Loading model '%s'\n", opts.Model)
|
||||||
if err := loadOrUnloadModel(cmd, &opts); err != nil {
|
if err := loadOrUnloadModel(cmd, &opts); err != nil {
|
||||||
|
if strings.Contains(err.Error(), "not found") {
|
||||||
|
fmt.Printf("error: %v\n", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
@@ -343,7 +347,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
|||||||
|
|
||||||
switch args[1] {
|
switch args[1] {
|
||||||
case "info":
|
case "info":
|
||||||
_ = showInfo(resp, os.Stderr)
|
_ = showInfo(resp, false, os.Stderr)
|
||||||
case "license":
|
case "license":
|
||||||
if resp.License == "" {
|
if resp.License == "" {
|
||||||
fmt.Println("No license was specified for this model.")
|
fmt.Println("No license was specified for this model.")
|
||||||
|
|||||||
@@ -87,7 +87,7 @@ func (p *gemma3Model) KV(t *Tokenizer) ggml.KV {
|
|||||||
kv["gemma3.embedding_length"] = p.HiddenSize
|
kv["gemma3.embedding_length"] = p.HiddenSize
|
||||||
kv["gemma3.feed_forward_length"] = p.IntermediateSize
|
kv["gemma3.feed_forward_length"] = p.IntermediateSize
|
||||||
default:
|
default:
|
||||||
kv["gemma3.context_length"] = cmp.Or(p.MaxPositionEmbeddings, 8192)
|
kv["gemma3.context_length"] = cmp.Or(p.MaxPositionEmbeddings, 131072)
|
||||||
kv["gemma3.embedding_length"] = p.TextModel.HiddenSize
|
kv["gemma3.embedding_length"] = p.TextModel.HiddenSize
|
||||||
kv["gemma3.feed_forward_length"] = p.TextModel.IntermediateSize
|
kv["gemma3.feed_forward_length"] = p.TextModel.IntermediateSize
|
||||||
kv["gemma3.attention.sliding_window"] = p.TextModel.SlidingWindow
|
kv["gemma3.attention.sliding_window"] = p.TextModel.SlidingWindow
|
||||||
|
|||||||
@@ -327,6 +327,10 @@ func (t Tensor) Size() uint64 {
|
|||||||
return t.parameters() * t.typeSize() / t.blockSize()
|
return t.parameters() * t.typeSize() / t.blockSize()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t Tensor) Type() string {
|
||||||
|
return fileType(t.Kind).String()
|
||||||
|
}
|
||||||
|
|
||||||
type container interface {
|
type container interface {
|
||||||
Name() string
|
Name() string
|
||||||
Decode(io.ReadSeeker) (model, error)
|
Decode(io.ReadSeeker) (model, error)
|
||||||
|
|||||||
40
logging/log.go
Normal file
40
logging/log.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
const LevelTrace slog.Level = slog.LevelDebug - 4
|
||||||
|
|
||||||
|
type Logger struct {
|
||||||
|
logger *slog.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLogger() *Logger {
|
||||||
|
handler := slog.NewTextHandler(os.Stdout, nil)
|
||||||
|
return &Logger{
|
||||||
|
logger: slog.New(handler),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Logger) Trace(msg string, args ...any) {
|
||||||
|
l.logger.Log(context.Background(), LevelTrace, msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Logger) Debug(msg string, args ...any) {
|
||||||
|
l.logger.Debug(msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Logger) Info(msg string, args ...any) {
|
||||||
|
l.logger.Info(msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Logger) Warn(msg string, args ...any) {
|
||||||
|
l.logger.Warn(msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Logger) Error(msg string, args ...any) {
|
||||||
|
l.logger.Error(msg, args...)
|
||||||
|
}
|
||||||
5
ml/backend/ggml/ggml/src/ollama-debug.c
vendored
5
ml/backend/ggml/ggml/src/ollama-debug.c
vendored
@@ -1,4 +1,5 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
#include <inttypes.h>
|
||||||
|
|
||||||
#include "ollama-debug.h"
|
#include "ollama-debug.h"
|
||||||
|
|
||||||
@@ -24,7 +25,7 @@ static void print_tensor(const void *tensor, void (*cb)(const void *, int),
|
|||||||
fprintf(stderr, "[");
|
fprintf(stderr, "[");
|
||||||
for (int i = 0; i < dims[0]; i++) {
|
for (int i = 0; i < dims[0]; i++) {
|
||||||
if (i >= nitems && i < dims[0] - nitems) {
|
if (i >= nitems && i < dims[0] - nitems) {
|
||||||
fprintf(stderr, "... (%lld more), ", dims[0] - 2 * nitems);
|
fprintf(stderr, "... (%" PRIi64 " more), ", dims[0] - 2 * nitems);
|
||||||
int skip = dims[0] - 2 * nitems;
|
int skip = dims[0] - 2 * nitems;
|
||||||
if (ndims > 1) {
|
if (ndims > 1) {
|
||||||
stride += mul(dims + 1, ndims - 1) * skip;
|
stride += mul(dims + 1, ndims - 1) * skip;
|
||||||
@@ -67,7 +68,7 @@ static void print_tensor_i32(const void *tensor, int i) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ollama_debug_tensor(const struct ggml_tensor *tensor, bool verbose, const char *prefix, int indent) {
|
static void ollama_debug_tensor(const struct ggml_tensor *tensor, bool verbose, const char *prefix, int indent) {
|
||||||
fprintf(stderr, "%s%s %s (%s): [%lld %lld %lld %lld]\n", prefix, tensor->name,
|
fprintf(stderr, "%s%s %s (%s): [%" PRIi64 " %" PRIi64 " %" PRIi64 " %" PRIi64 "]\n", prefix, tensor->name,
|
||||||
ggml_op_name(tensor->op), ggml_type_name(tensor->type), tensor->ne[0],
|
ggml_op_name(tensor->op), ggml_type_name(tensor->type), tensor->ne[0],
|
||||||
tensor->ne[1], tensor->ne[2], tensor->ne[3]);
|
tensor->ne[1], tensor->ne[2], tensor->ne[3]);
|
||||||
|
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ import (
|
|||||||
"github.com/ollama/ollama/model/input"
|
"github.com/ollama/ollama/model/input"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var ErrNoVisionModel = errors.New("this model is missing data required for image input")
|
||||||
|
|
||||||
// Model implements a specific model architecture, defining the forward pass and any model-specific configuration
|
// Model implements a specific model architecture, defining the forward pass and any model-specific configuration
|
||||||
type Model interface {
|
type Model interface {
|
||||||
Forward(ml.Context, input.Options) (ml.Tensor, error)
|
Forward(ml.Context, input.Options) (ml.Tensor, error)
|
||||||
|
|||||||
@@ -84,6 +84,10 @@ func New(c ml.Config) (model.Model, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, error) {
|
func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, error) {
|
||||||
|
if len(m.VisionModel.Layers) == 0 {
|
||||||
|
return nil, model.ErrNoVisionModel
|
||||||
|
}
|
||||||
|
|
||||||
image, _, err := image.Decode(bytes.NewReader(multimodalData))
|
image, _, err := image.Decode(bytes.NewReader(multimodalData))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ type TextOptions struct {
|
|||||||
attnKeyLen, attnValLen int
|
attnKeyLen, attnValLen int
|
||||||
eps, ropeScale float32
|
eps, ropeScale float32
|
||||||
ropeLocalBase, ropeGlobalBase float32
|
ropeLocalBase, ropeGlobalBase float32
|
||||||
finalLogitSoftcap float32
|
|
||||||
largeModelScaling bool
|
largeModelScaling bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -57,16 +56,15 @@ func newTextModel(c ml.Config) *TextModel {
|
|||||||
),
|
),
|
||||||
Layers: make([]TextLayer, numBlocks),
|
Layers: make([]TextLayer, numBlocks),
|
||||||
TextOptions: &TextOptions{
|
TextOptions: &TextOptions{
|
||||||
hiddenSize: int(c.Uint("embedding_length")),
|
hiddenSize: int(c.Uint("embedding_length")),
|
||||||
numHeads: int(c.Uint("attention.head_count")),
|
numHeads: int(c.Uint("attention.head_count")),
|
||||||
numKVHeads: int(c.Uint("attention.head_count_kv")),
|
numKVHeads: int(c.Uint("attention.head_count_kv")),
|
||||||
attnKeyLen: int(c.Uint("attention.key_length", 256)),
|
attnKeyLen: int(c.Uint("attention.key_length", 256)),
|
||||||
attnValLen: int(c.Uint("attention.value_length", 256)),
|
attnValLen: int(c.Uint("attention.value_length", 256)),
|
||||||
eps: c.Float("attention.layer_norm_rms_epsilon", 1e-06),
|
eps: c.Float("attention.layer_norm_rms_epsilon", 1e-06),
|
||||||
ropeLocalBase: c.Float("rope.local.freq_base", 10000.0),
|
ropeLocalBase: c.Float("rope.local.freq_base", 10000.0),
|
||||||
ropeGlobalBase: c.Float("rope.global.freq_base", 1000000.0),
|
ropeGlobalBase: c.Float("rope.global.freq_base", 1000000.0),
|
||||||
ropeScale: c.Float("rope.freq_scale", 1.0),
|
ropeScale: c.Float("rope.freq_scale", 1.0),
|
||||||
finalLogitSoftcap: c.Float("final_logit_softcapping", 30.0),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,10 +243,5 @@ func (m *TextModel) Forward(ctx ml.Context, inputs, positions, outputs ml.Tensor
|
|||||||
}
|
}
|
||||||
|
|
||||||
hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps)
|
hiddenState = m.OutputNorm.Forward(ctx, hiddenState, m.eps)
|
||||||
hiddenState = m.Output.Forward(ctx, hiddenState)
|
return m.Output.Forward(ctx, hiddenState)
|
||||||
|
|
||||||
// final logit softcap
|
|
||||||
hiddenState = hiddenState.Scale(ctx, 1.0/float64(m.TextOptions.finalLogitSoftcap))
|
|
||||||
hiddenState = hiddenState.Tanh(ctx)
|
|
||||||
return hiddenState.Scale(ctx, float64(m.TextOptions.finalLogitSoftcap))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -63,6 +63,10 @@ func New(c ml.Config) (model.Model, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, error) {
|
func (m *Model) EncodeMultimodal(ctx ml.Context, multimodalData []byte) (any, error) {
|
||||||
|
if len(m.VisionModel.Transformer.Layers) == 0 || len(m.GlobalTransformer.Layers) == 0 {
|
||||||
|
return nil, model.ErrNoVisionModel
|
||||||
|
}
|
||||||
|
|
||||||
image, _, err := image.Decode(bytes.NewReader(multimodalData))
|
image, _, err := image.Decode(bytes.NewReader(multimodalData))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -2,15 +2,18 @@ package model
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"iter"
|
"iter"
|
||||||
"log/slog"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/dlclark/regexp2"
|
"github.com/dlclark/regexp2"
|
||||||
queue "github.com/emirpasic/gods/v2/queues/priorityqueue"
|
queue "github.com/emirpasic/gods/v2/queues/priorityqueue"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/logging"
|
||||||
)
|
)
|
||||||
|
|
||||||
const spmWhitespaceSep = "▁"
|
const spmWhitespaceSep = "▁"
|
||||||
|
|
||||||
|
var log = logging.NewLogger()
|
||||||
|
|
||||||
func replaceWhitespaceBySeperator(s string) string {
|
func replaceWhitespaceBySeperator(s string) string {
|
||||||
return strings.ReplaceAll(s, " ", spmWhitespaceSep)
|
return strings.ReplaceAll(s, " ", spmWhitespaceSep)
|
||||||
}
|
}
|
||||||
@@ -24,7 +27,7 @@ type SentencePieceModel struct {
|
|||||||
var _ TextProcessor = (*SentencePieceModel)(nil)
|
var _ TextProcessor = (*SentencePieceModel)(nil)
|
||||||
|
|
||||||
func NewSentencePieceModel(pre string, vocab *Vocabulary) SentencePieceModel {
|
func NewSentencePieceModel(pre string, vocab *Vocabulary) SentencePieceModel {
|
||||||
slog.Debug("Tokens", "num tokens", len(vocab.Values), "vals", vocab.Values[:5], "scores", vocab.Scores[:5], "types", vocab.Types[:5])
|
log.Debug("Tokens", "num tokens", len(vocab.Values), "vals", vocab.Values[:5], "scores", vocab.Scores[:5], "types", vocab.Types[:5])
|
||||||
|
|
||||||
counter := map[int]int{}
|
counter := map[int]int{}
|
||||||
var maxTokenLen int
|
var maxTokenLen int
|
||||||
@@ -38,7 +41,7 @@ func NewSentencePieceModel(pre string, vocab *Vocabulary) SentencePieceModel {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("Token counts", "normal", counter[TOKEN_TYPE_NORMAL], "unknown", counter[TOKEN_TYPE_UNKNOWN], "control", counter[TOKEN_TYPE_CONTROL],
|
log.Debug("Token counts", "normal", counter[TOKEN_TYPE_NORMAL], "unknown", counter[TOKEN_TYPE_UNKNOWN], "control", counter[TOKEN_TYPE_CONTROL],
|
||||||
"user defined", counter[TOKEN_TYPE_USER_DEFINED], "unused", counter[TOKEN_TYPE_UNUSED], "byte", counter[TOKEN_TYPE_BYTE],
|
"user defined", counter[TOKEN_TYPE_USER_DEFINED], "unused", counter[TOKEN_TYPE_UNUSED], "byte", counter[TOKEN_TYPE_BYTE],
|
||||||
"max token len", maxTokenLen)
|
"max token len", maxTokenLen)
|
||||||
|
|
||||||
@@ -91,7 +94,7 @@ func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error)
|
|||||||
fragments = append(fragments[:i], append(middle, fragments[i+1:]...)...)
|
fragments = append(fragments[:i], append(middle, fragments[i+1:]...)...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slog.Debug("fragments", "frags", fragments)
|
log.Trace("fragments", "frags", fragments)
|
||||||
|
|
||||||
var ids []int32
|
var ids []int32
|
||||||
for _, frag := range fragments {
|
for _, frag := range fragments {
|
||||||
@@ -129,7 +132,7 @@ func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("tokenizer", "merges", merges)
|
log.Trace("tokenizer", "merges", merges)
|
||||||
|
|
||||||
pairwise := func(a, b int) *candidate {
|
pairwise := func(a, b int) *candidate {
|
||||||
if a < 0 || b >= len(runes) {
|
if a < 0 || b >= len(runes) {
|
||||||
@@ -156,7 +159,7 @@ func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error)
|
|||||||
pqv := pq.Values()
|
pqv := pq.Values()
|
||||||
for _, v := range pqv {
|
for _, v := range pqv {
|
||||||
e := v.(*candidate)
|
e := v.(*candidate)
|
||||||
slog.Debug("candidate", "candidate", e)
|
log.Trace("candidate", "candidate", e)
|
||||||
}
|
}
|
||||||
|
|
||||||
for !pq.Empty() {
|
for !pq.Empty() {
|
||||||
@@ -164,7 +167,7 @@ func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error)
|
|||||||
pair := v.(*candidate)
|
pair := v.(*candidate)
|
||||||
left, right := merges[pair.a], merges[pair.b]
|
left, right := merges[pair.a], merges[pair.b]
|
||||||
|
|
||||||
slog.Debug("pair", "left", left, "right", right)
|
log.Trace("pair", "left", left, "right", right)
|
||||||
if len(left.runes) == 0 || len(right.runes) == 0 {
|
if len(left.runes) == 0 || len(right.runes) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -189,14 +192,14 @@ func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("merges", "merges", merges)
|
log.Trace("merges", "merges", merges)
|
||||||
|
|
||||||
for _, merge := range merges {
|
for _, merge := range merges {
|
||||||
if len(merge.runes) > 0 {
|
if len(merge.runes) > 0 {
|
||||||
if id := spm.vocab.Encode(string(merge.runes)); id >= 0 {
|
if id := spm.vocab.Encode(string(merge.runes)); id >= 0 {
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
} else {
|
} else {
|
||||||
slog.Debug("missing token", "token", string(merge.runes))
|
log.Error("missing token", "token", string(merge.runes))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -206,19 +209,19 @@ func (spm SentencePieceModel) Encode(s string, addSpecial bool) ([]int32, error)
|
|||||||
if addSpecial && len(ids) > 0 {
|
if addSpecial && len(ids) > 0 {
|
||||||
if spm.vocab.AddBOS {
|
if spm.vocab.AddBOS {
|
||||||
if ids[0] == spm.vocab.BOS {
|
if ids[0] == spm.vocab.BOS {
|
||||||
slog.Warn("adding bos token to prompt which already has it", "id", spm.vocab.BOS)
|
log.Warn("adding bos token to prompt which already has it", "id", spm.vocab.BOS)
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("adding bos token to prompt", "id", spm.vocab.BOS)
|
log.Debug("adding bos token to prompt", "id", spm.vocab.BOS)
|
||||||
ids = append([]int32{spm.vocab.BOS}, ids...)
|
ids = append([]int32{spm.vocab.BOS}, ids...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if spm.vocab.AddEOS {
|
if spm.vocab.AddEOS {
|
||||||
if ids[len(ids)-1] == spm.vocab.EOS {
|
if ids[len(ids)-1] == spm.vocab.EOS {
|
||||||
slog.Warn("adding eos token to prompt which already has it", "id", spm.vocab.EOS)
|
log.Warn("adding eos token to prompt which already has it", "id", spm.vocab.EOS)
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("adding eos token to prompt", "id", spm.vocab.EOS)
|
log.Debug("adding eos token to prompt", "id", spm.vocab.EOS)
|
||||||
ids = append(ids, spm.vocab.EOS)
|
ids = append(ids, spm.vocab.EOS)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -241,6 +244,6 @@ func (spm SentencePieceModel) Decode(ids []int32) (string, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
slog.Debug("decoded", "ids", ids, "text", sb.String())
|
log.Debug("decoded", "ids", ids, "text", sb.String())
|
||||||
return sb.String(), nil
|
return sb.String(), nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -116,19 +116,9 @@ func (i *Instance) Readline() (string, error) {
|
|||||||
|
|
||||||
switch r {
|
switch r {
|
||||||
case KeyUp:
|
case KeyUp:
|
||||||
if i.History.Pos > 0 {
|
i.historyPrev(buf, ¤tLineBuf)
|
||||||
if i.History.Pos == i.History.Size() {
|
|
||||||
currentLineBuf = []rune(buf.String())
|
|
||||||
}
|
|
||||||
buf.Replace([]rune(i.History.Prev()))
|
|
||||||
}
|
|
||||||
case KeyDown:
|
case KeyDown:
|
||||||
if i.History.Pos < i.History.Size() {
|
i.historyNext(buf, ¤tLineBuf)
|
||||||
buf.Replace([]rune(i.History.Next()))
|
|
||||||
if i.History.Pos == i.History.Size() {
|
|
||||||
buf.Replace(currentLineBuf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case KeyLeft:
|
case KeyLeft:
|
||||||
buf.MoveLeft()
|
buf.MoveLeft()
|
||||||
case KeyRight:
|
case KeyRight:
|
||||||
@@ -185,6 +175,10 @@ func (i *Instance) Readline() (string, error) {
|
|||||||
esc = true
|
esc = true
|
||||||
case CharInterrupt:
|
case CharInterrupt:
|
||||||
return "", ErrInterrupt
|
return "", ErrInterrupt
|
||||||
|
case CharPrev:
|
||||||
|
i.historyPrev(buf, ¤tLineBuf)
|
||||||
|
case CharNext:
|
||||||
|
i.historyNext(buf, ¤tLineBuf)
|
||||||
case CharLineStart:
|
case CharLineStart:
|
||||||
buf.MoveToStart()
|
buf.MoveToStart()
|
||||||
case CharLineEnd:
|
case CharLineEnd:
|
||||||
@@ -246,6 +240,24 @@ func (i *Instance) HistoryDisable() {
|
|||||||
i.History.Enabled = false
|
i.History.Enabled = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (i *Instance) historyPrev(buf *Buffer, currentLineBuf *[]rune) {
|
||||||
|
if i.History.Pos > 0 {
|
||||||
|
if i.History.Pos == i.History.Size() {
|
||||||
|
*currentLineBuf = []rune(buf.String())
|
||||||
|
}
|
||||||
|
buf.Replace([]rune(i.History.Prev()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *Instance) historyNext(buf *Buffer, currentLineBuf *[]rune) {
|
||||||
|
if i.History.Pos < i.History.Size() {
|
||||||
|
buf.Replace([]rune(i.History.Next()))
|
||||||
|
if i.History.Pos == i.History.Size() {
|
||||||
|
buf.Replace(*currentLineBuf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewTerminal() (*Terminal, error) {
|
func NewTerminal() (*Terminal, error) {
|
||||||
fd := os.Stdin.Fd()
|
fd := os.Stdin.Fd()
|
||||||
termios, err := SetRawMode(fd)
|
termios, err := SetRawMode(fd)
|
||||||
|
|||||||
@@ -691,65 +691,6 @@ type EmbeddingResponse struct {
|
|||||||
Embedding []float32 `json:"embedding"`
|
Embedding []float32 `json:"embedding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) embeddings(w http.ResponseWriter, r *http.Request) {
|
|
||||||
var req EmbeddingRequest
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
||||||
http.Error(w, fmt.Sprintf("bad request: %s", err), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
slog.Debug("embedding request", "content", req.Content)
|
|
||||||
|
|
||||||
seq, err := s.NewSequence(req.Content, nil, NewSequenceParams{embedding: true})
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, fmt.Sprintf("Failed to create new sequence: %v", err), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure there is a place to put the sequence, released when removed from s.seqs
|
|
||||||
if err := s.seqsSem.Acquire(r.Context(), 1); err != nil {
|
|
||||||
if errors.Is(err, context.Canceled) {
|
|
||||||
slog.Info("aborting embeddings request due to client closing the connection")
|
|
||||||
} else {
|
|
||||||
slog.Error("Failed to acquire semaphore", "error", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.mu.Lock()
|
|
||||||
found := false
|
|
||||||
for i, sq := range s.seqs {
|
|
||||||
if sq == nil {
|
|
||||||
seq.cache, seq.inputs, err = s.cache.LoadCacheSlot(seq.inputs, req.CachePrompt)
|
|
||||||
if err != nil {
|
|
||||||
s.mu.Unlock()
|
|
||||||
http.Error(w, fmt.Sprintf("Failed to load cache: %v", err), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.seqs[i] = seq
|
|
||||||
s.cond.Signal()
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
if !found {
|
|
||||||
http.Error(w, "could not find an available sequence", http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
embedding := <-seq.embedding
|
|
||||||
|
|
||||||
if err := json.NewEncoder(w).Encode(&EmbeddingResponse{
|
|
||||||
Embedding: embedding,
|
|
||||||
}); err != nil {
|
|
||||||
http.Error(w, fmt.Sprintf("failed to encode response: %v", err), http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type HealthResponse struct {
|
type HealthResponse struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
Progress float32 `json:"progress"`
|
Progress float32 `json:"progress"`
|
||||||
@@ -927,9 +868,13 @@ func Execute(args []string) error {
|
|||||||
defer listener.Close()
|
defer listener.Close()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
mux.HandleFunc("/embedding", server.embeddings)
|
// TODO: support embeddings
|
||||||
mux.HandleFunc("/completion", server.completion)
|
mux.HandleFunc("POST /embedding", func(w http.ResponseWriter, r *http.Request) {
|
||||||
mux.HandleFunc("/health", server.health)
|
http.Error(w, "this model does not support embeddings", http.StatusNotImplemented)
|
||||||
|
})
|
||||||
|
|
||||||
|
mux.HandleFunc("POST /completion", server.completion)
|
||||||
|
mux.HandleFunc("GET /health", server.health)
|
||||||
|
|
||||||
httpServer := http.Server{
|
httpServer := http.Server{
|
||||||
Handler: mux,
|
Handler: mux,
|
||||||
|
|||||||
@@ -87,8 +87,8 @@ func (s *Sampler) sample(tokens []token) (token, error) {
|
|||||||
// topK also sorts the tokens in descending order of logits
|
// topK also sorts the tokens in descending order of logits
|
||||||
tokens = topK(tokens, s.topK)
|
tokens = topK(tokens, s.topK)
|
||||||
|
|
||||||
// token logit values are updated to probabilities
|
|
||||||
tokens = temperature(tokens, s.temperature)
|
tokens = temperature(tokens, s.temperature)
|
||||||
|
tokens = softmax(tokens)
|
||||||
|
|
||||||
tokens = topP(tokens, s.topP)
|
tokens = topP(tokens, s.topP)
|
||||||
tokens = minP(tokens, s.minP)
|
tokens = minP(tokens, s.minP)
|
||||||
|
|||||||
@@ -25,8 +25,18 @@ func (h *tokenHeap) Pop() any {
|
|||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|
||||||
// temperature applies scaling and softmax to the logits
|
// temperature applies scaling to the logits
|
||||||
func temperature(ts []token, temp float32) []token {
|
func temperature(ts []token, temp float32) []token {
|
||||||
|
// Ensure temperature clipping near 0 to avoid numerical instability
|
||||||
|
temp = max(temp, 1e-7)
|
||||||
|
for i := range ts {
|
||||||
|
ts[i].value = ts[i].value / temp
|
||||||
|
}
|
||||||
|
return ts
|
||||||
|
}
|
||||||
|
|
||||||
|
// softmax applies normalization to the logits
|
||||||
|
func softmax(ts []token) []token {
|
||||||
// Find max logit for numerical stability
|
// Find max logit for numerical stability
|
||||||
maxLogit := float32(math.Inf(-1))
|
maxLogit := float32(math.Inf(-1))
|
||||||
for _, t := range ts {
|
for _, t := range ts {
|
||||||
@@ -35,15 +45,14 @@ func temperature(ts []token, temp float32) []token {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Apply temperature and compute exp(x - max)
|
// Compute exp(x - max)
|
||||||
temp = max(temp, 1e-7)
|
|
||||||
var sum float32
|
var sum float32
|
||||||
for i, v := range ts {
|
for i, v := range ts {
|
||||||
ts[i].value = float32(math.Exp(float64((v.value - maxLogit) / temp)))
|
ts[i].value = float32(math.Exp(float64(v.value - maxLogit)))
|
||||||
sum += ts[i].value
|
sum += ts[i].value
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normalize
|
// exp(x - max) / sum(exp(x - max))
|
||||||
for i := range ts {
|
for i := range ts {
|
||||||
ts[i].value /= sum
|
ts[i].value /= sum
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,27 +32,83 @@ func compareLogits(t *testing.T, name string, want []float32, got []token) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTemperatureAndSoftmax(t *testing.T) {
|
func TestTemperature(t *testing.T) {
|
||||||
input := []float32{1, 4, -2, 0}
|
input := []float32{1.0, 4.0, -2.0, 0.0}
|
||||||
got := temperature(toTokens(input), 0.5)
|
got := temperature(toTokens(input), 0.5)
|
||||||
|
want := []float32{2.0, 8.0, -4.0, 0.0}
|
||||||
|
compareLogits(t, "temperature(0.5)", want, got)
|
||||||
|
|
||||||
// Check probabilities sum to 1
|
got = temperature(toTokens(input), 1.0)
|
||||||
var sum float32
|
want = []float32{1.0, 4.0, -2.0, 0.0}
|
||||||
for _, token := range got {
|
compareLogits(t, "temperature(1)", want, got)
|
||||||
sum += token.value
|
|
||||||
}
|
got = temperature(toTokens(input), 0.0)
|
||||||
if math.Abs(float64(sum-1.0)) > 1e-6 {
|
want = []float32{1e7, 4e7, -2e7, 0.0}
|
||||||
t.Errorf("probabilities don't sum to 1: got %f", sum)
|
compareLogits(t, "temperature(0)", want, got)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSoftmax(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input []float32
|
||||||
|
expected []float32
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "correctness softmax",
|
||||||
|
input: []float32{1, -2, 3, 0},
|
||||||
|
expected: []float32{0.113550, 0.005653, 0.839024, 0.041773},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "normal distribution",
|
||||||
|
input: []float32{0.026986899, 0.043722924, 0.036774673, 0.27755088, 0.0046718004, 0.08582123, 0.20409796, 0.00412893, 0.15720603, 0.045046154, 0.0030491839, 0.01681367},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single value",
|
||||||
|
input: []float32{1.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "identical values",
|
||||||
|
input: []float32{0.9, 0.9, 0.9},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "large values",
|
||||||
|
input: []float32{1000.0, 2000.0, 3000.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "small values",
|
||||||
|
input: []float32{1e-6, 2e-6, 3e-6},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "negative values",
|
||||||
|
input: []float32{-1.0, -2.0, -3.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mixed values",
|
||||||
|
input: []float32{-100.0, 0.0, 100.0},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
got = temperature(toTokens(input), 1)
|
for _, tt := range tests {
|
||||||
// Check probabilities sum to 1
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
sum = 0.0
|
got := softmax(toTokens(tt.input))
|
||||||
for _, token := range got {
|
|
||||||
sum += token.value
|
if tt.expected != nil {
|
||||||
}
|
compareLogits(t, tt.name, tt.expected, got)
|
||||||
if math.Abs(float64(sum-1.0)) > 1e-6 {
|
return
|
||||||
t.Errorf("probabilities don't sum to 1: got %f", sum)
|
}
|
||||||
|
|
||||||
|
// Check probabilities sum to 1
|
||||||
|
var sum float32
|
||||||
|
for _, token := range got {
|
||||||
|
sum += token.value
|
||||||
|
if token.value < 0 || token.value > 1 {
|
||||||
|
t.Errorf("probability out of range [0,1]: got %f", token.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if math.Abs(float64(sum-1.0)) > 1e-6 {
|
||||||
|
t.Errorf("probabilities don't sum to 1: got %f", sum)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -97,7 +153,7 @@ func TestTopP(t *testing.T) {
|
|||||||
tokens := toTokens(input)
|
tokens := toTokens(input)
|
||||||
|
|
||||||
// First apply temperature and softmax to get probabilities
|
// First apply temperature and softmax to get probabilities
|
||||||
tokens = temperature(tokens, 1)
|
tokens = softmax(tokens)
|
||||||
tokens = topK(tokens, 20)
|
tokens = topK(tokens, 20)
|
||||||
|
|
||||||
// Then apply topP
|
// Then apply topP
|
||||||
@@ -115,7 +171,7 @@ func TestMinP(t *testing.T) {
|
|||||||
tokens := toTokens(input)
|
tokens := toTokens(input)
|
||||||
|
|
||||||
// First apply temperature and softmax
|
// First apply temperature and softmax
|
||||||
tokens = temperature(tokens, 1)
|
tokens = softmax(tokens)
|
||||||
|
|
||||||
// Then apply minP
|
// Then apply minP
|
||||||
got := minP(tokens, 0.2)
|
got := minP(tokens, 0.2)
|
||||||
@@ -163,6 +219,14 @@ func BenchmarkTransforms(b *testing.B) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
b.Run("Softmax", func(b *testing.B) {
|
||||||
|
b.ResetTimer()
|
||||||
|
for b.Loop() {
|
||||||
|
copy(tokensCopy, tokens)
|
||||||
|
softmax(tokensCopy)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
b.Run("TopK", func(b *testing.B) {
|
b.Run("TopK", func(b *testing.B) {
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for b.Loop() {
|
for b.Loop() {
|
||||||
|
|||||||
@@ -435,7 +435,7 @@ func (s *Server) EmbedHandler(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
kvData, err := getKVData(m.ModelPath, false)
|
kvData, _, err := getModelData(m.ModelPath, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
@@ -483,8 +483,7 @@ func (s *Server) EmbedHandler(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := g.Wait(); err != nil {
|
if err := g.Wait(); err != nil {
|
||||||
slog.Error("embedding generation failed", "error", err)
|
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": strings.TrimSpace(err.Error())})
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Errorf("failed to generate embeddings: %v", err)})
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -545,8 +544,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) {
|
|||||||
|
|
||||||
embedding, err := r.Embedding(c.Request.Context(), req.Prompt)
|
embedding, err := r.Embedding(c.Request.Context(), req.Prompt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Info(fmt.Sprintf("embedding generation failed: %v", err))
|
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": strings.TrimSpace(err.Error())})
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Errorf("failed to generate embedding: %v", err)})
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -850,16 +848,23 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) {
|
|||||||
fmt.Fprint(&sb, m.String())
|
fmt.Fprint(&sb, m.String())
|
||||||
resp.Modelfile = sb.String()
|
resp.Modelfile = sb.String()
|
||||||
|
|
||||||
kvData, err := getKVData(m.ModelPath, req.Verbose)
|
kvData, tensors, err := getModelData(m.ModelPath, req.Verbose)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(kvData, "general.name")
|
delete(kvData, "general.name")
|
||||||
delete(kvData, "tokenizer.chat_template")
|
delete(kvData, "tokenizer.chat_template")
|
||||||
resp.ModelInfo = kvData
|
resp.ModelInfo = kvData
|
||||||
|
|
||||||
|
tensorData := make([]api.Tensor, len(tensors.Items()))
|
||||||
|
for cnt, t := range tensors.Items() {
|
||||||
|
tensorData[cnt] = api.Tensor{Name: t.Name, Type: t.Type(), Shape: t.Shape}
|
||||||
|
}
|
||||||
|
resp.Tensors = tensorData
|
||||||
|
|
||||||
if len(m.ProjectorPaths) > 0 {
|
if len(m.ProjectorPaths) > 0 {
|
||||||
projectorData, err := getKVData(m.ProjectorPaths[0], req.Verbose)
|
projectorData, _, err := getModelData(m.ProjectorPaths[0], req.Verbose)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -869,17 +874,17 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) {
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getKVData(digest string, verbose bool) (ggml.KV, error) {
|
func getModelData(digest string, verbose bool) (ggml.KV, ggml.Tensors, error) {
|
||||||
maxArraySize := 0
|
maxArraySize := 0
|
||||||
if verbose {
|
if verbose {
|
||||||
maxArraySize = -1
|
maxArraySize = -1
|
||||||
}
|
}
|
||||||
kvData, err := llm.LoadModel(digest, maxArraySize)
|
data, err := llm.LoadModel(digest, maxArraySize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, ggml.Tensors{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
kv := kvData.KV()
|
kv := data.KV()
|
||||||
|
|
||||||
if !verbose {
|
if !verbose {
|
||||||
for k := range kv {
|
for k := range kv {
|
||||||
@@ -889,7 +894,7 @@ func getKVData(digest string, verbose bool) (ggml.KV, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return kv, nil
|
return kv, data.Tensors(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) ListHandler(c *gin.Context) {
|
func (s *Server) ListHandler(c *gin.Context) {
|
||||||
|
|||||||
Reference in New Issue
Block a user