embeddings

This commit is contained in:
jmorganca 2024-05-27 11:33:47 -07:00
parent ce15ed6d69
commit b2ef3bf490
2 changed files with 125 additions and 34 deletions

View File

@ -55,6 +55,7 @@ func NewContextParams() ContextParams {
params.n_ctx = C.uint(2048) params.n_ctx = C.uint(2048)
params.n_threads = C.uint(runtime.NumCPU()) params.n_threads = C.uint(runtime.NumCPU())
params.n_threads_batch = params.n_threads params.n_threads_batch = params.n_threads
params.embeddings = C.bool(true)
return ContextParams{c: params} return ContextParams{c: params}
} }
@ -124,6 +125,20 @@ func (c *Context) KvCacheSeqRm(seqId int, p0 int, p1 int) bool {
return bool(C.llama_kv_cache_seq_rm(c.c, C.int(seqId), C.int(p0), C.int(p1))) return bool(C.llama_kv_cache_seq_rm(c.c, C.int(seqId), C.int(p0), C.int(p1)))
} }
// Get the embeddings for a sequence id
func (c *Context) GetEmbeddingsSeq(seqId int) []float32 {
embeddings := unsafe.Pointer(C.llama_get_embeddings_seq(c.c, C.int(seqId)))
if embeddings == nil {
return nil
}
return unsafe.Slice((*float32)(embeddings), c.Model().NEmbd())
}
func (c *Context) GetEmbeddingsIth(i int) []float32 {
return unsafe.Slice((*float32)(unsafe.Pointer(C.llama_get_embeddings_ith(c.c, C.int32_t(i)))), c.Model().NEmbd())
}
func LoadModelFromFile(modelPath string, params ModelParams) *Model { func LoadModelFromFile(modelPath string, params ModelParams) *Model {
return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)} return &Model{c: C.llama_load_model_from_file(C.CString(modelPath), params.c)}
} }
@ -225,6 +240,10 @@ func (m *Model) Tokenize(text string, maxTokens int, addSpecial bool, parseSpeci
return tokens, nil return tokens, nil
} }
func (m *Model) NEmbd() int {
return int(C.llama_n_embd(m.c))
}
func Quantize(infile, outfile string, ftype uint32) error { func Quantize(infile, outfile string, ftype uint32) error {
cinfile := C.CString(infile) cinfile := C.CString(infile)
defer C.free(unsafe.Pointer(cinfile)) defer C.free(unsafe.Pointer(cinfile))

View File

@ -23,9 +23,16 @@ type Sequence struct {
// tokens left to evaluate // tokens left to evaluate
tokens []int tokens []int
// channel to send responses over
responses chan string responses chan string
samplingCtx *llama.SamplingContext samplingCtx *llama.SamplingContext
// channel to send back the embedding if embedding only
embedding chan []float32
// true if an embedding are to be returned instead of text generation
embeddingOnly bool
} }
// prompt returns true if the prompt is still being processed // prompt returns true if the prompt is still being processed
@ -33,38 +40,26 @@ func (s *Sequence) prompt() bool {
return s.nPast < len(s.tokens)-1 return s.nPast < len(s.tokens)-1
} }
func (s *Server) NewSequence(r Request, w http.ResponseWriter) *Sequence { func (s *Server) NewSequence(prompt string, params *llama.SamplingParams, embedding bool) *Sequence {
var samplingParams llama.SamplingParams tokens, err := s.lc.Model().Tokenize(prompt, 2048, false, true)
samplingParams.TopK = r.TopK
samplingParams.TopP = r.TopP
samplingParams.TfsZ = r.TFSZ
samplingParams.TypicalP = r.TypicalP
samplingParams.Temp = r.Temperature
samplingParams.PenaltyRepeat = r.RepeatPenalty
samplingParams.PenaltyFreq = r.FrequencyPenalty
samplingParams.PenaltyPresent = r.PresencePenalty
samplingParams.Mirostat = r.Mirostat
samplingParams.MirostatTau = r.MirostatTau
samplingParams.MirostatEta = r.MirostatEta
samplingParams.PenalizeNl = r.PenalizeNewline
samplingParams.Seed = uint32(r.Seed)
samplingParams.Grammar = r.Grammar
tokens, err := s.lc.Model().Tokenize(r.Prompt, 2048, false, true)
if err != nil { if err != nil {
panic(err) panic(err)
} }
sc := llama.NewSamplingContext(samplingParams) var sc *llama.SamplingContext
if params != nil {
for _, t := range tokens { sc = llama.NewSamplingContext(*params)
sc.Accept(s.lc, t, false) for _, t := range tokens {
sc.Accept(s.lc, t, false)
}
} }
return &Sequence{ return &Sequence{
tokens: tokens, tokens: tokens,
responses: make(chan string, 1), responses: make(chan string, 1),
samplingCtx: sc, embedding: make(chan []float32, 1),
samplingCtx: sc,
embeddingOnly: embedding,
} }
} }
@ -152,6 +147,20 @@ func (s *Server) run(ctx context.Context) {
continue continue
} }
// if done processing the prompt, generating an embedding and return
if seq.embeddingOnly {
embd := s.lc.GetEmbeddingsSeq(i)
if embd == nil {
embd = s.lc.GetEmbeddingsIth(ibatch[i])
}
seq.embedding <- embd
close(seq.embedding)
s.lc.KvCacheSeqRm(i, 0, -1)
s.seqs[i] = nil
continue
}
// sample a token // sample a token
// logits := s.lc.GetLogitsIth(ibatch[i]) // logits := s.lc.GetLogitsIth(ibatch[i])
// token := s.lc.SampleTokenGreedy(logits) // token := s.lc.SampleTokenGreedy(logits)
@ -178,7 +187,7 @@ func (s *Server) run(ctx context.Context) {
} }
} }
type Request struct { type CompletionRequest struct {
Prompt string `json:"prompt"` Prompt string `json:"prompt"`
Images []string `json:"images"` Images []string `json:"images"`
Grammar string `json:"grammar"` Grammar string `json:"grammar"`
@ -186,14 +195,14 @@ type Request struct {
api.Options api.Options
} }
type Response struct { type CompletionResponse struct {
Token string `json:"token"` Token string `json:"token"`
} }
func (s *Server) handler(w http.ResponseWriter, r *http.Request) { func (s *Server) completion(w http.ResponseWriter, r *http.Request) {
var request Request var req CompletionRequest
request.Options = api.DefaultOptions() req.Options = api.DefaultOptions()
if err := json.NewDecoder(r.Body).Decode(&request); err != nil { if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Bad request", http.StatusBadRequest) http.Error(w, "Bad request", http.StatusBadRequest)
return return
} }
@ -203,8 +212,26 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Transfer-Encoding", "chunked")
w.WriteHeader(http.StatusOK) w.WriteHeader(http.StatusOK)
seq := s.NewSequence(request, w) var samplingParams llama.SamplingParams
samplingParams.TopK = req.TopK
samplingParams.TopP = req.TopP
samplingParams.TfsZ = req.TFSZ
samplingParams.TypicalP = req.TypicalP
samplingParams.Temp = req.Temperature
samplingParams.PenaltyRepeat = req.RepeatPenalty
samplingParams.PenaltyFreq = req.FrequencyPenalty
samplingParams.PenaltyPresent = req.PresencePenalty
samplingParams.Mirostat = req.Mirostat
samplingParams.MirostatTau = req.MirostatTau
samplingParams.MirostatEta = req.MirostatEta
samplingParams.PenalizeNl = req.PenalizeNewline
samplingParams.Seed = uint32(req.Seed)
samplingParams.Grammar = req.Grammar
seq := s.NewSequence(req.Prompt, &samplingParams, false)
// TODO (jmorganca): add to sequence queue instead of
// failing if a slot isn't available
s.mu.Lock() s.mu.Lock()
for i, sq := range s.seqs { for i, sq := range s.seqs {
if sq == nil { if sq == nil {
@ -215,8 +242,9 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
} }
s.mu.Unlock() s.mu.Unlock()
// stream the response
for token := range seq.responses { for token := range seq.responses {
if err := json.NewEncoder(w).Encode(&Response{ if err := json.NewEncoder(w).Encode(&CompletionResponse{
Token: token, Token: token,
}); err != nil { }); err != nil {
log.Println("Failed to encode result:", err) log.Println("Failed to encode result:", err)
@ -233,6 +261,46 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
} }
} }
type EmbeddingRequest struct {
Prompt string `json:"prompt"`
}
type EmbeddingResponse struct {
Embedding []float32 `json:"embedding"`
}
// TODO (jmorganca): is it safe to do this concurrently with decoding?
func (s *Server) embeddings(w http.ResponseWriter, r *http.Request) {
var req EmbeddingRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Bad request", http.StatusBadRequest)
return
}
w.Header().Set("Content-Type", "application/json")
seq := s.NewSequence(req.Prompt, nil, true)
s.mu.Lock()
for i, sq := range s.seqs {
if sq == nil {
s.seqs[i] = seq
s.cond.Signal()
break
}
}
s.mu.Unlock()
embedding := <-seq.embedding
if err := json.NewEncoder(w).Encode(&EmbeddingResponse{
Embedding: embedding,
}); err != nil {
log.Println("Failed to encode result:", err)
return
}
}
func main() { func main() {
mpath := flag.String("model", "", "Path to model binary file") mpath := flag.String("model", "", "Path to model binary file")
ppath := flag.String("projector", "", "Path to projector binary file") ppath := flag.String("projector", "", "Path to projector binary file")
@ -279,8 +347,12 @@ func main() {
} }
defer listener.Close() defer listener.Close()
mux := http.NewServeMux()
mux.HandleFunc("/embeddings", server.embeddings)
mux.HandleFunc("/completion", server.completion)
httpServer := http.Server{ httpServer := http.Server{
Handler: http.HandlerFunc(server.handler), Handler: mux,
} }
log.Println("Server listening on", addr) log.Println("Server listening on", addr)