Compare commits

..

5 Commits

Author SHA1 Message Date
Devon Rifkin
7c94471d38 ggml: more accurate estimates for head count array case
Also standardized the approach by always treatting `HeadCount()` and
`HeadCountKV()` as arrays by filling them with the same value when
they're a scalar in the original GGUF
2025-04-10 16:28:34 -07:00
Devon Rifkin
0188c74c41 ggml: fix crash for array head counts
If it's an array, it only uses the first value in the array instead of
doing something more intelligent to calculate the estimate

Fixes: #9984
2025-04-10 16:25:40 -07:00
Tom Sheffler
ef65174df2 types: include the 'items' and '$defs' fields to properly handle "array" types (#10091)
---------

Co-authored-by: Parth Sareen <parth.sareen@ollama.com>
2025-04-09 17:45:49 -07:00
Ire Gaddr
42ecb9f138 fix(scheduler): make model unload order deterministic (#10185) 2025-04-09 16:01:02 -07:00
湛露先生
5c0331fd83 Fix dockerfile. (#9855)
Signed-off-by: zhanluxianshen <zhanluxianshen@163.com>
2025-04-09 13:24:56 -07:00
8 changed files with 289 additions and 91 deletions

View File

@@ -104,8 +104,8 @@ COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12
FROM --platform=linux/arm64 scratch AS arm64 FROM --platform=linux/arm64 scratch AS arm64
COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11 COPY --from=cuda-11 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_v11
COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12 COPY --from=cuda-12 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_v12
COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 lib/ollama/cuda_jetpack5 COPY --from=jetpack-5 dist/lib/ollama/cuda_v11 /lib/ollama/cuda_jetpack5
COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 lib/ollama/cuda_jetpack6 COPY --from=jetpack-6 dist/lib/ollama/cuda_v12 /lib/ollama/cuda_jetpack6
FROM scratch AS rocm FROM scratch AS rocm
COPY --from=rocm-6 dist/lib/ollama/rocm /lib/ollama/rocm COPY --from=rocm-6 dist/lib/ollama/rocm /lib/ollama/rocm

View File

@@ -163,6 +163,7 @@ func (t *ToolCallFunctionArguments) String() string {
type Tool struct { type Tool struct {
Type string `json:"type"` Type string `json:"type"`
Items any `json:"items,omitempty"`
Function ToolFunction `json:"function"` Function ToolFunction `json:"function"`
} }
@@ -213,9 +214,12 @@ type ToolFunction struct {
Description string `json:"description"` Description string `json:"description"`
Parameters struct { Parameters struct {
Type string `json:"type"` Type string `json:"type"`
Defs any `json:"$defs,omitempty"`
Items any `json:"items,omitempty"`
Required []string `json:"required"` Required []string `json:"required"`
Properties map[string]struct { Properties map[string]struct {
Type PropertyType `json:"type"` Type PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"` Description string `json:"description"`
Enum []any `json:"enum,omitempty"` Enum []any `json:"enum,omitempty"`
} `json:"properties"` } `json:"properties"`

View File

@@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"io" "io"
"log/slog" "log/slog"
"reflect"
"slices" "slices"
"strings" "strings"
@@ -52,32 +53,80 @@ func (kv KV) EmbeddingLength() uint64 {
return uint64(kv.Uint("embedding_length")) return uint64(kv.Uint("embedding_length"))
} }
func (kv KV) HeadCount() uint64 { func (kv KV) HeadCounts() []uint64 {
return uint64(kv.Uint("attention.head_count")) return kv.UintOrArrayAsArray("attention.head_count", kv.BlockCount(), 1)
} }
func (kv KV) HeadCountKV() uint64 { func (kv KV) HeadCountKVs() []uint64 {
return uint64(kv.Uint("attention.head_count_kv", 1)) return kv.UintOrArrayAsArray("attention.head_count_kv", kv.BlockCount(), 1)
} }
func (kv KV) EmbeddingHeadCount() uint64 { func (kv KV) EmbeddingHeadCount() []uint64 {
if heads := kv.HeadCount(); heads > 0 { headCount := kv.HeadCounts()
return kv.EmbeddingLength() / heads embeddingHeadCount := make([]uint64, len(headCount))
for i, heads := range headCount {
if heads == 0 {
embeddingHeadCount[i] = 0
} else {
embeddingHeadCount[i] = kv.EmbeddingLength() / heads
}
} }
return embeddingHeadCount
}
func (kv KV) FillArrayOrDefault(key string, defaultValue []uint64) []uint64 {
length := len(defaultValue)
if v, ok := keyValueUntyped(kv, key); ok {
switch v := v.(type) {
case uint32:
return FillArray(uint64(v), length)
case uint64:
return FillArray(v, length)
case int32:
return FillArray(uint64(v), length)
default:
slog.Warn("unsupported type", "key", key, "type", reflect.TypeOf(v))
}
}
return defaultValue
}
func (kv KV) EmbeddingHeadCountK() []uint64 {
return kv.FillArrayOrDefault("attention.key_length", kv.EmbeddingHeadCount())
}
func (kv KV) EmbeddingHeadCountV() []uint64 {
return kv.FillArrayOrDefault("attention.value_length", kv.EmbeddingHeadCount())
}
func (kv KV) GQAMax() uint64 {
heads := kv.HeadCounts()
headsKV := kv.HeadCountKVs()
if len(heads) != len(headsKV) {
slog.Warn("head count and head count kv are not the same length")
return 0
}
if len(heads) == 0 {
slog.Warn("head count is empty")
return 0 return 0
} }
func (kv KV) EmbeddingHeadCountK() uint64 { maxGQA := uint64(0)
return uint64(kv.Uint("attention.key_length", uint32(kv.EmbeddingHeadCount()))) for i := range heads {
head := heads[i]
headKV := headsKV[i]
if head == 0 || headKV == 0 {
return 0
}
gqa := head / headKV
if gqa > maxGQA {
maxGQA = gqa
}
} }
func (kv KV) EmbeddingHeadCountV() uint64 { return maxGQA
return uint64(kv.Uint("attention.value_length", uint32(kv.EmbeddingHeadCount())))
}
func (kv KV) GQA() uint64 {
return kv.HeadCount() / kv.HeadCountKV()
} }
func (kv KV) ContextLength() uint64 { func (kv KV) ContextLength() uint64 {
@@ -104,6 +153,41 @@ func (kv KV) Bool(key string, defaultValue ...bool) bool {
return keyValue(kv, key, append(defaultValue, false)...) return keyValue(kv, key, append(defaultValue, false)...)
} }
func (kv KV) UintOrArrayAsArray(key string, n uint64, defaultSingleValue ...uint64) []uint64 {
var singleValue *uint64
if v, ok := keyValueUntyped(kv, key); ok {
switch v := v.(type) {
case *array:
switch v.values[0].(type) {
case int32, uint32, uint64:
values, ok := AsUint64Array(v.values)
if ok {
return values
}
default:
slog.Warn("unexpected array value type", "key", key, "type", reflect.TypeOf(v))
}
case uint32:
val := uint64(v)
singleValue = &val
case int32:
val := uint64(v)
singleValue = &val
}
}
if singleValue == nil {
slog.Warn("falling back to default")
singleValue = &defaultSingleValue[0]
}
values := make([]uint64, n)
for i := range values {
values[i] = *singleValue
}
return values
}
func (kv KV) Strings(key string, defaultValue ...[]string) []string { func (kv KV) Strings(key string, defaultValue ...[]string) []string {
r := keyValue(kv, key, &array{}) r := keyValue(kv, key, &array{})
s := make([]string, r.size) s := make([]string, r.size)
@@ -141,11 +225,7 @@ func (kv KV) OllamaEngineRequired() bool {
} }
func keyValue[T string | uint32 | uint64 | float32 | *array | bool](kv KV, key string, defaultValue ...T) T { func keyValue[T string | uint32 | uint64 | float32 | *array | bool](kv KV, key string, defaultValue ...T) T {
if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") { if val, ok := keyValueUntyped(kv, key); ok {
key = kv.Architecture() + "." + key
}
if val, ok := kv[key]; ok {
return val.(T) return val.(T)
} }
@@ -153,6 +233,18 @@ func keyValue[T string | uint32 | uint64 | float32 | *array | bool](kv KV, key s
return defaultValue[0] return defaultValue[0]
} }
func keyValueUntyped(kv KV, key string) (any, bool) {
if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") {
key = kv.Architecture() + "." + key
}
if val, ok := kv[key]; ok {
return val, true
}
return nil, false
}
type Tensors struct { type Tensors struct {
items []*Tensor items []*Tensor
Offset uint64 Offset uint64
@@ -418,12 +510,22 @@ func Decode(rs io.ReadSeeker, maxArraySize int) (*GGML, int64, error) {
func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType string) (kv []uint64, partialOffload, fullOffload uint64) { func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType string) (kv []uint64, partialOffload, fullOffload uint64) {
embedding := f.KV().EmbeddingLength() embedding := f.KV().EmbeddingLength()
heads := f.KV().HeadCount() heads := f.KV().HeadCounts()
headsKV := f.KV().HeadCountKV() headsKV := f.KV().HeadCountKVs()
vocab := uint64(f.KV()["tokenizer.ggml.tokens"].(*array).size) vocab := uint64(f.KV()["tokenizer.ggml.tokens"].(*array).size)
embeddingHeads := f.KV().EmbeddingHeadCount() embeddingHeads := f.KV().EmbeddingHeadCount()
maxEmbeddingHeads, ok := MaxValue(embeddingHeads)
if !ok {
maxEmbeddingHeads = 1
slog.Warn("failed to get max embedding heads")
}
embeddingHeadsK := f.KV().EmbeddingHeadCountK() embeddingHeadsK := f.KV().EmbeddingHeadCountK()
maxEmbeddingHeadsK, ok := MaxValue(embeddingHeadsK)
if !ok {
maxEmbeddingHeadsK = 1
slog.Warn("failed to get max embedding headsK")
}
embeddingHeadsV := f.KV().EmbeddingHeadCountV() embeddingHeadsV := f.KV().EmbeddingHeadCountV()
layers := f.Tensors().GroupLayers() layers := f.Tensors().GroupLayers()
@@ -431,19 +533,30 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
bytesPerElement := kvCacheBytesPerElement(kvCacheType) bytesPerElement := kvCacheBytesPerElement(kvCacheType)
kv = make([]uint64, f.KV().BlockCount()) kv = make([]uint64, f.KV().BlockCount())
for i := range kv { for i := range kv {
kv[i] = uint64(float64(context*(embeddingHeadsK+embeddingHeadsV)*headsKV) * bytesPerElement) kv[i] = uint64(float64(context*(embeddingHeadsK[i]+embeddingHeadsV[i])*headsKV[i]) * bytesPerElement)
}
maxHeads, ok := MaxValue(heads)
if !ok {
maxHeads = 1
slog.Warn("failed to get max heads")
}
maxHeadsKV, ok := MaxValue(headsKV)
if !ok {
maxHeadsKV = 1
slog.Warn("failed to get max headsKV")
} }
switch f.KV().Architecture() { switch f.KV().Architecture() {
case "llama": case "llama":
fullOffload = max( fullOffload = max(
4*batch*(1+4*embedding+context*(1+heads)), 4*batch*(1+4*embedding+context*(1+maxHeads)),
4*batch*(embedding+vocab), 4*batch*(embedding+vocab),
) )
partialOffload = 4 * batch * embedding partialOffload = 4 * batch * embedding
partialOffload += max( partialOffload += max(
4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*heads+embeddingHeads*headsKV), 4*batch*(1+embedding+max(context, embedding))+embedding*embedding*9/16+4*context*(batch*maxHeads+maxEmbeddingHeads*maxHeadsKV),
4*batch*(embedding+vocab)+embedding*vocab*105/128, 4*batch*(embedding+vocab)+embedding*vocab*105/128,
) )
@@ -451,16 +564,16 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
// mixtral 8x22b // mixtral 8x22b
ff := uint64(f.KV()["llama.feed_forward_length"].(uint32)) ff := uint64(f.KV()["llama.feed_forward_length"].(uint32))
partialOffload = max( partialOffload = max(
3*ffnGateExpsWeight.Size()+4*batch*(2*ff+headsKV+embedding+context+embeddingHeads*headsKV), 3*ffnGateExpsWeight.Size()+4*batch*(2*ff+maxHeadsKV+embedding+context+maxEmbeddingHeads*maxHeadsKV),
4*(context*batch*heads+context*embeddingHeads*headsKV+batch*1024+embeddingHeads*headsKV*batch), 4*(context*batch*maxHeads+context*maxEmbeddingHeads*maxHeadsKV+batch*1024+maxEmbeddingHeads*maxHeadsKV*batch),
) )
} else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok { } else if ffnGateWeight, ok := layers["blk.0"]["ffn_gate.0.weight"]; ok {
// mixtral 8x7b // mixtral 8x7b
ffnGateWeight1 := ffnGateWeight.Shape[1] ffnGateWeight1 := ffnGateWeight.Shape[1]
fullOffload = 4 * batch * (2 + 3*embedding + context*(1+heads) + 2*headsKV + ffnGateWeight1) fullOffload = 4 * batch * (2 + 3*embedding + context*(1+maxHeads) + 2*maxHeadsKV + ffnGateWeight1)
partialOffload = max( partialOffload = max(
4*batch*(3+embeddingHeads*headsKV+embedding+context*(1+heads)+ffnGateWeight1)+(embedding*embedding+3*embedding*headsKV*ffnGateWeight1)*9/16, 4*batch*(3+maxEmbeddingHeads*maxHeadsKV+embedding+context*(1+maxHeads)+ffnGateWeight1)+(embedding*embedding+3*embedding*maxHeadsKV*ffnGateWeight1)*9/16,
4*batch*(1+2*embedding+context*(1+heads))+embedding*(6*context*headsKV/heads+embedding*9/16), 4*batch*(1+2*embedding+context*(1+maxHeads))+embedding*(6*context*maxHeadsKV/maxHeads+embedding*9/16),
) )
} }
case "mllama": case "mllama":
@@ -469,7 +582,7 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
crossAttentionLayers := f.KV().Uints("attention.cross_attention_layers") crossAttentionLayers := f.KV().Uints("attention.cross_attention_layers")
for i := range kv { for i := range kv {
if slices.Contains(crossAttentionLayers, uint32(i)) { if slices.Contains(crossAttentionLayers, uint32(i)) {
kv[i] = headsKV * (embeddingHeadsK + embeddingHeadsV) * kv[i] = headsKV[i] * (embeddingHeadsK[i] + embeddingHeadsV[i]) *
4 * // sizeof(float32) 4 * // sizeof(float32)
visionTokens * visionTokens *
tiles tiles
@@ -477,7 +590,7 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
} }
fullOffload = max( fullOffload = max(
4*batch*(2+3*embedding+embeddingHeadsK*heads+context*(1+heads)), 4*batch*(2+3*embedding+maxEmbeddingHeadsK*maxHeads+context*(1+maxHeads)),
// vocab graph // vocab graph
4*batch*(embedding+vocab), 4*batch*(embedding+vocab),
) )
@@ -491,23 +604,23 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
partialOffload = max( partialOffload = max(
4*(batch* 4*(batch*
(2*embedding+1+context*(1+heads)+embeddingHeadsK*heads)+ (2*embedding+1+context*(1+maxHeads)+maxEmbeddingHeadsK*maxHeads)+
ropeFreqsCount+ ropeFreqsCount+
embeddingHeadsK*context*headsKV), maxEmbeddingHeadsK*context*maxHeadsKV),
// vocab graph // vocab graph
4*batch*(embedding+vocab)+embedding*vocab*105/128, 4*batch*(embedding+vocab)+embedding*vocab*105/128,
) )
case "gemma", "gemma2", "gemma3": case "gemma", "gemma2", "gemma3":
fullOffload = max( fullOffload = max(
4*batch*(embedding+vocab), 4*batch*(embedding+vocab),
4*batch*(2+context+context*heads+2*embedding+2*embeddingHeadsK*heads), 4*batch*(2+context+context*maxHeads+2*embedding+2*maxEmbeddingHeadsK*maxHeads),
) )
partialOffload = max( partialOffload = max(
4*embedding*batch+embedding*vocab*105/128+4*vocab*batch, 4*embedding*batch+embedding*vocab*105/128+4*vocab*batch,
4*batch*(2*embedding+1+2*embeddingHeadsK*heads+context+context*heads)+ 4*batch*(2*embedding+1+2*maxEmbeddingHeadsK*maxHeads+context+context*maxHeads)+
4*embeddingHeadsK*context*8+ 4*maxEmbeddingHeadsK*context*8+
embedding*embeddingHeadsK*heads*9/16, embedding*embedding*maxEmbeddingHeadsK*maxHeads*9/16,
) )
// Gemma2 also has sliding window attention but we only have an optimized implementation in the Ollama // Gemma2 also has sliding window attention but we only have an optimized implementation in the Ollama
@@ -519,42 +632,42 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
// Every 6th layer is a global layer, which is the full context size that has already been set. The other // Every 6th layer is a global layer, which is the full context size that has already been set. The other
// layers are the smaller local (sliding) layers. // layers are the smaller local (sliding) layers.
if (i+1)%gemma3GlobalCacheCount != 0 { if (i+1)%gemma3GlobalCacheCount != 0 {
kv[i] = uint64(float64(slidingWindow*(embeddingHeadsK+embeddingHeadsV)*headsKV) * bytesPerElement) kv[i] = uint64(float64(slidingWindow*(embeddingHeadsK[i]+embeddingHeadsV[i])*headsKV[i]) * bytesPerElement)
} }
} }
} }
case "command-r": case "command-r":
fullOffload = max( fullOffload = max(
4*batch*(embedding+vocab), 4*batch*(embedding+vocab),
4*batch*(2+4*embedding+context*(1+heads)), 4*batch*(2+4*embedding+context*(1+maxHeads)),
) )
partialOffload = max( partialOffload = max(
4*batch*(embedding+vocab)+embedding*vocab*105/128, 4*batch*(embedding+vocab)+embedding*vocab*105/128,
4*batch*(1+2*embedding+context*(1+heads))+4*embedding*context+embedding*embedding*9/16, 4*batch*(1+2*embedding+context*(1+maxHeads))+4*embedding*context+embedding*embedding*9/16,
) )
case "qwen2": case "qwen2":
fullOffload = max( fullOffload = max(
4*batch*(embedding+vocab), 4*batch*(embedding+vocab),
4*batch*(1+2*embedding+context+context*heads), 4*batch*(1+2*embedding+context+context*maxHeads),
) )
partialOffload = max( partialOffload = max(
4*batch*(embedding+vocab)+embedding*vocab*105/128, 4*batch*(embedding+vocab)+embedding*vocab*105/128,
4*(batch*(1+2*embedding+context*(1+heads))+embedding*(1+context)), 4*(batch*(1+2*embedding+context*(1+maxHeads))+embedding*(1+context)),
) )
case "phi2": case "phi2":
fullOffload = max( fullOffload = max(
4*batch*(embedding+vocab), 4*batch*(embedding+vocab),
4*batch*(1+4*embedding+context+context*heads), 4*batch*(1+4*embedding+context+context*maxHeads),
) )
partialOffload = max( partialOffload = max(
4*batch*(2*embedding+vocab)+embedding*vocab*105/128, 4*batch*(2*embedding+vocab)+embedding*vocab*105/128,
4*batch*(2+3*embedding+context+context*heads), 4*batch*(2+3*embedding+context+context*maxHeads),
) )
case "stablelm": case "stablelm":
fullOffload = 4 * batch * (context*(1+heads) + 3*embedding + 2) fullOffload = 4 * batch * (context*(1+maxHeads) + 3*embedding + 2)
partialOffload = max( partialOffload = max(
4*batch*(vocab+2*embedding), 4*batch*(vocab+2*embedding),
fullOffload, fullOffload,
@@ -562,12 +675,12 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
case "deepseek2": case "deepseek2":
fullOffload = max( fullOffload = max(
4*batch*(3*embedding+vocab), 4*batch*(3*embedding+vocab),
4*batch*(3*embedding+2+context*(1+headsKV)+2*embeddingHeadsK*headsKV), 4*batch*(3*embedding+2+context*(1+maxHeadsKV)+2*maxEmbeddingHeadsK*maxHeadsKV),
) )
partialOffload = max( partialOffload = max(
4*batch*(3*embedding+vocab)+embedding*vocab*105/128, 4*batch*(3*embedding+vocab)+embedding*vocab*105/128,
4*batch*(2*embedding+1+2*embeddingHeadsK*headsKV+context+context*headsKV)+4*embeddingHeadsK*context*headsKV+embedding*embeddingHeadsK*headsKV*9/16, 4*batch*(2*embedding+1+2*maxEmbeddingHeadsK*maxHeadsKV+context+context*maxHeadsKV)+4*maxEmbeddingHeadsK*context*maxHeadsKV+embedding*embedding*maxEmbeddingHeadsK*maxHeadsKV*9/16,
) )
case "chatglm": case "chatglm":
fullOffload = 4 * batch * (embedding + vocab) fullOffload = 4 * batch * (embedding + vocab)
@@ -578,8 +691,8 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
4*batch*(2+ 4*batch*(2+
2*embedding+ 2*embedding+
context+ context+
context*heads+ context*maxHeads+
embeddingHeadsK*heads+ maxEmbeddingHeadsK*maxHeads+
qkvBias.Shape[0]), qkvBias.Shape[0]),
) )
@@ -587,11 +700,11 @@ func (f GGML) GraphSize(context, batch uint64, numParallel int, kvCacheType stri
partialOffload, partialOffload,
4*batch*(1+ 4*batch*(1+
2*embedding+ 2*embedding+
embeddingHeadsK*heads+ maxEmbeddingHeadsK*maxHeads+
context+ context+
context*heads)+ context*maxHeads)+
4*embeddingHeadsK*context+ 4*maxEmbeddingHeadsK*context+
4*context*embeddingHeadsK+ 4*context*maxEmbeddingHeadsK+
4*qkvBias.Shape[0], 4*qkvBias.Shape[0],
) )
} }
@@ -663,9 +776,15 @@ func (f GGML) SupportsFlashAttention() bool {
} }
// Check head counts match and are non-zero // Check head counts match and are non-zero
headCountK := f.KV().EmbeddingHeadCountK() headCount := f.KV().HeadCounts()
headCountV := f.KV().EmbeddingHeadCountV() embeddingHeadCountK := f.KV().EmbeddingHeadCountK()
return headCountK != 0 && headCountV != 0 && headCountK == headCountV embeddingHeadCountV := f.KV().EmbeddingHeadCountV()
for i := range headCount {
if embeddingHeadCountK[i] != embeddingHeadCountV[i] {
return false
}
}
return true
} }
// kvCacheBytesPerElement returns the number of bytes per element for a given KV cache type // kvCacheBytesPerElement returns the number of bytes per element for a given KV cache type
@@ -679,3 +798,54 @@ func kvCacheBytesPerElement(cacheType string) float64 {
return 2 // f16 (default) return 2 // f16 (default)
} }
} }
func AsUint64Array(v []any) ([]uint64, bool) {
switch v[0].(type) {
case uint32:
values := make([]uint64, len(v))
for i, v := range v {
values[i] = uint64(v.(uint32))
}
return values, true
case uint64:
values := make([]uint64, len(v))
for i, v := range v {
values[i] = v.(uint64)
}
return values, true
case int32:
values := make([]uint64, len(v))
for i, val := range v {
val := val.(int32)
if val < 0 {
slog.Warn("negative value in int32 array", "value", val)
return nil, false
}
values[i] = uint64(val)
}
return values, true
}
return nil, false
}
func MaxValue(values []uint64) (uint64, bool) {
if len(values) == 0 {
return 0, false
}
max := values[0]
for _, v := range values {
if v > max {
max = v
}
}
return max, true
}
func FillArray[T any](value T, n int) []T {
values := make([]T, n)
for i := range values {
values[i] = value
}
return values
}

View File

@@ -149,7 +149,7 @@ func EstimateGPULayers(gpus []discover.GpuInfo, f *ggml.GGML, projectors []strin
} }
if graphPartialOffload == 0 { if graphPartialOffload == 0 {
graphPartialOffload = f.KV().GQA() * kvTotal / 6 graphPartialOffload = f.KV().GQAMax() * kvTotal / 6
} }
if graphFullOffload == 0 { if graphFullOffload == 0 {
graphFullOffload = graphPartialOffload graphFullOffload = graphPartialOffload

View File

@@ -281,9 +281,12 @@ func TestChatMiddleware(t *testing.T) {
Description: "Get the current weather", Description: "Get the current weather",
Parameters: struct { Parameters: struct {
Type string `json:"type"` Type string `json:"type"`
Defs any `json:"$defs,omitempty"`
Items any `json:"items,omitempty"`
Required []string `json:"required"` Required []string `json:"required"`
Properties map[string]struct { Properties map[string]struct {
Type api.PropertyType `json:"type"` Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"` Description string `json:"description"`
Enum []any `json:"enum,omitempty"` Enum []any `json:"enum,omitempty"`
} `json:"properties"` } `json:"properties"`
@@ -292,6 +295,7 @@ func TestChatMiddleware(t *testing.T) {
Required: []string{"location"}, Required: []string{"location"},
Properties: map[string]struct { Properties: map[string]struct {
Type api.PropertyType `json:"type"` Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"` Description string `json:"description"`
Enum []any `json:"enum,omitempty"` Enum []any `json:"enum,omitempty"`
}{ }{

View File

@@ -497,8 +497,12 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
return nil, err return nil, err
} }
var offset int64
for offset < stat.Size() {
f, n, err := ggml.Decode(blob, 0) f, n, err := ggml.Decode(blob, 0)
if err != nil { if errors.Is(err, io.EOF) {
break
} else if err != nil {
return nil, err return nil, err
} }
@@ -510,7 +514,7 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
} }
var layer Layer var layer Layer
if digest != "" && n == stat.Size() { if digest != "" && n == stat.Size() && offset == 0 {
layer, err = NewLayerFromLayer(digest, mediatype, blob.Name()) layer, err = NewLayerFromLayer(digest, mediatype, blob.Name())
if err != nil { if err != nil {
slog.Debug("could not create new layer from layer", "error", err) slog.Debug("could not create new layer from layer", "error", err)
@@ -520,13 +524,15 @@ func ggufLayers(digest string, fn func(resp api.ProgressResponse)) ([]*layerGGML
// Fallback to creating layer from file copy (either NewLayerFromLayer failed, or digest empty/n != stat.Size()) // Fallback to creating layer from file copy (either NewLayerFromLayer failed, or digest empty/n != stat.Size())
if layer.Digest == "" { if layer.Digest == "" {
layer, err = NewLayer(io.NewSectionReader(blob, 0, n), mediatype) layer, err = NewLayer(io.NewSectionReader(blob, offset, n), mediatype)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
layers = append(layers, &layerGGML{layer, f}) layers = append(layers, &layerGGML{layer, f})
offset = n
}
return detectChatTemplate(layers) return detectChatTemplate(layers)
} }

View File

@@ -370,9 +370,12 @@ func TestGenerateChat(t *testing.T) {
Description: "Get the current weather", Description: "Get the current weather",
Parameters: struct { Parameters: struct {
Type string `json:"type"` Type string `json:"type"`
Defs any `json:"$defs,omitempty"`
Items any `json:"items,omitempty"`
Required []string `json:"required"` Required []string `json:"required"`
Properties map[string]struct { Properties map[string]struct {
Type api.PropertyType `json:"type"` Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"` Description string `json:"description"`
Enum []any `json:"enum,omitempty"` Enum []any `json:"enum,omitempty"`
} `json:"properties"` } `json:"properties"`
@@ -381,6 +384,7 @@ func TestGenerateChat(t *testing.T) {
Required: []string{"location"}, Required: []string{"location"},
Properties: map[string]struct { Properties: map[string]struct {
Type api.PropertyType `json:"type"` Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"` Description string `json:"description"`
Enum []any `json:"enum,omitempty"` Enum []any `json:"enum,omitempty"`
}{ }{
@@ -467,9 +471,12 @@ func TestGenerateChat(t *testing.T) {
Description: "Get the current weather", Description: "Get the current weather",
Parameters: struct { Parameters: struct {
Type string `json:"type"` Type string `json:"type"`
Defs any `json:"$defs,omitempty"`
Items any `json:"items,omitempty"`
Required []string `json:"required"` Required []string `json:"required"`
Properties map[string]struct { Properties map[string]struct {
Type api.PropertyType `json:"type"` Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"` Description string `json:"description"`
Enum []any `json:"enum,omitempty"` Enum []any `json:"enum,omitempty"`
} `json:"properties"` } `json:"properties"`
@@ -478,6 +485,7 @@ func TestGenerateChat(t *testing.T) {
Required: []string{"location"}, Required: []string{"location"},
Properties: map[string]struct { Properties: map[string]struct {
Type api.PropertyType `json:"type"` Type api.PropertyType `json:"type"`
Items any `json:"items,omitempty"`
Description string `json:"description"` Description string `json:"description"`
Enum []any `json:"enum,omitempty"` Enum []any `json:"enum,omitempty"`
}{ }{

View File

@@ -667,13 +667,19 @@ func (runner *runnerRef) waitForVRAMRecovery() chan any {
return finished return finished
} }
type ByDuration []*runnerRef type ByDurationAndName []*runnerRef
func (a ByDuration) Len() int { return len(a) } func (a ByDurationAndName) Len() int { return len(a) }
func (a ByDuration) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByDurationAndName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByDuration) Less(i, j int) bool { func (a ByDurationAndName) Less(i, j int) bool {
// uint64 to turn negative time (never unload) to largest // Primary sort by session duration (uint64 to handle negatives)
return uint64(a[i].sessionDuration) < uint64(a[j].sessionDuration) d1 := uint64(a[i].sessionDuration)
d2 := uint64(a[j].sessionDuration)
if d1 != d2 {
return d1 < d2
}
// Secondary sort by model path lex order
return a[i].modelPath < a[j].modelPath
} }
// TODO - future consideration to pick runners based on size // TODO - future consideration to pick runners based on size
@@ -775,7 +781,7 @@ func (s *Scheduler) findRunnerToUnload() *runnerRef {
// In the future we can enhance the algorithm to be smarter about picking the optimal runner to unload // In the future we can enhance the algorithm to be smarter about picking the optimal runner to unload
// e.g., if we have multiple options, will one make room for the request? // e.g., if we have multiple options, will one make room for the request?
sort.Sort(ByDuration(runnerList)) sort.Sort(ByDurationAndName(runnerList))
// First try to find a runner that's already idle // First try to find a runner that's already idle
for _, runner := range runnerList { for _, runner := range runnerList {