Initial Draft
This commit is contained in:
parent
0f87628b6d
commit
ff191d7cba
@ -210,7 +210,10 @@ type EmbeddingRequest struct {
|
||||
Model string `json:"model"`
|
||||
|
||||
// Prompt is the textual prompt to embed.
|
||||
Prompt string `json:"prompt"`
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
|
||||
// PromptBatch is a list of prompts to embed.
|
||||
PromptBatch []string `json:"prompt_batch,omitempty"`
|
||||
|
||||
// KeepAlive controls how long the model will stay loaded in memory following
|
||||
// this request.
|
||||
@ -222,7 +225,8 @@ type EmbeddingRequest struct {
|
||||
|
||||
// EmbeddingResponse is the response from [Client.Embeddings].
|
||||
type EmbeddingResponse struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
Embedding []float64 `json:"embedding,omitempty"`
|
||||
EmbeddingBatch [][]float64 `json:"embedding_batch,omitempty"`
|
||||
}
|
||||
|
||||
// CreateRequest is the request passed to [Client.Create].
|
||||
|
46
llm/ext_server/server.cpp
vendored
46
llm/ext_server/server.cpp
vendored
@ -3166,26 +3166,36 @@ int main(int argc, char **argv) {
|
||||
prompt = "";
|
||||
}
|
||||
|
||||
json image_data;
|
||||
if (body.count("image_data") != 0) {
|
||||
image_data = body["image_data"];
|
||||
}
|
||||
else
|
||||
{
|
||||
image_data = "";
|
||||
}
|
||||
|
||||
// create and queue the task
|
||||
const int task_id = llama.queue_tasks.get_new_id();
|
||||
llama.queue_results.add_waiting_task_id(task_id);
|
||||
llama.request_completion(task_id, { {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, true, -1);
|
||||
json responses;
|
||||
{
|
||||
const int id_task = llama.queue_tasks.get_new_id();
|
||||
llama.queue_results.add_waiting_task_id(id_task);
|
||||
llama.request_completion(id_task, {{"prompt", prompt}}, true, -1);
|
||||
|
||||
// get the result
|
||||
task_result result = llama.queue_results.recv(task_id);
|
||||
llama.queue_results.remove_waiting_task_id(task_id);
|
||||
|
||||
// send the result
|
||||
return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
|
||||
// get the result
|
||||
task_result result = llama.queue_results.recv(id_task);
|
||||
llama.queue_results.remove_waiting_task_id(id_task);
|
||||
if (!result.error) {
|
||||
if (result.result_json.count("results")) {
|
||||
// result for multi-task
|
||||
responses = result.result_json.at("results");
|
||||
} else {
|
||||
// result for single task
|
||||
responses = std::vector<json>(1, result.result_json);
|
||||
}
|
||||
json embeddings = json::array();
|
||||
for (auto & elem : responses) {
|
||||
embeddings.push_back(json_value(elem, "embedding", json::array()));
|
||||
}
|
||||
// send the result
|
||||
json result = json{{"embedding", embeddings}};
|
||||
return res.set_content(result.dump(), "application/json; charset=utf-8");
|
||||
} else {
|
||||
// return error
|
||||
return res.set_content(result.result_json.dump(), "application/json; charset=utf-8");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// GG: if I put the main loop inside a thread, it crashes on the first request when build in Debug!?
|
||||
|
@ -33,7 +33,7 @@ type LlamaServer interface {
|
||||
Ping(ctx context.Context) error
|
||||
WaitUntilRunning(ctx context.Context) error
|
||||
Completion(ctx context.Context, req CompletionRequest, fn func(CompletionResponse)) error
|
||||
Embedding(ctx context.Context, prompt string) ([]float64, error)
|
||||
Embedding(ctx context.Context, prompt []string) ([][]float64, error)
|
||||
Tokenize(ctx context.Context, content string) ([]int, error)
|
||||
Detokenize(ctx context.Context, tokens []int) (string, error)
|
||||
Close() error
|
||||
@ -842,14 +842,14 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
||||
}
|
||||
|
||||
type EmbeddingRequest struct {
|
||||
Content string `json:"content"`
|
||||
Content []string `json:"content"`
|
||||
}
|
||||
|
||||
type EmbeddingResponse struct {
|
||||
Embedding []float64 `json:"embedding"`
|
||||
Embedding [][]float64 `json:"embedding"`
|
||||
}
|
||||
|
||||
func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, error) {
|
||||
func (s *llmServer) Embedding(ctx context.Context, prompt []string) ([][]float64, error) {
|
||||
if err := s.sem.Acquire(ctx, 1); err != nil {
|
||||
slog.Error("Failed to acquire semaphore", "error", err)
|
||||
return nil, err
|
||||
@ -864,7 +864,7 @@ func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, er
|
||||
return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
|
||||
}
|
||||
|
||||
data, err := json.Marshal(TokenizeRequest{Content: prompt})
|
||||
data, err := json.Marshal(EmbeddingRequest{Content: prompt})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling embed data: %w", err)
|
||||
}
|
||||
|
@ -389,23 +389,39 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// an empty request loads the model
|
||||
if req.Prompt == "" {
|
||||
c.JSON(http.StatusOK, api.EmbeddingResponse{Embedding: []float64{}})
|
||||
return
|
||||
}
|
||||
switch {
|
||||
// single embedding
|
||||
case len(req.Prompt) > 0:
|
||||
slog.Info("embedding request", "prompt", req.Prompt)
|
||||
embeddings, err := runner.llama.Embedding(c.Request.Context(), []string{req.Prompt})
|
||||
if err != nil {
|
||||
slog.Info(fmt.Sprintf("embedding generation failed: %v", err))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate embedding"})
|
||||
return
|
||||
}
|
||||
|
||||
embedding, err := runner.llama.Embedding(c.Request.Context(), req.Prompt)
|
||||
if err != nil {
|
||||
slog.Info(fmt.Sprintf("embedding generation failed: %v", err))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate embedding"})
|
||||
return
|
||||
}
|
||||
resp := api.EmbeddingResponse{Embedding: embeddings[0]}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
// batch embeddings
|
||||
case len(req.PromptBatch) > 0:
|
||||
embeddings, err := runner.llama.Embedding(c.Request.Context(), req.PromptBatch)
|
||||
if err != nil {
|
||||
slog.Info(fmt.Sprintf("batch embedding generation failed: %v", err))
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to generate embedding"})
|
||||
return
|
||||
}
|
||||
|
||||
resp := api.EmbeddingResponse{
|
||||
Embedding: embedding,
|
||||
resp := api.EmbeddingResponse{EmbeddingBatch: embeddings}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
|
||||
// empty prompt loads the model
|
||||
default:
|
||||
if req.PromptBatch != nil {
|
||||
c.JSON(http.StatusOK, api.EmbeddingResponse{EmbeddingBatch: [][]float64{}})
|
||||
} else {
|
||||
c.JSON(http.StatusOK, api.EmbeddingResponse{Embedding: []float64{}})
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
||||
func (s *Server) PullModelHandler(c *gin.Context) {
|
||||
|
@ -608,7 +608,7 @@ type mockLlm struct {
|
||||
pingResp error
|
||||
waitResp error
|
||||
completionResp error
|
||||
embeddingResp []float64
|
||||
embeddingResp [][]float64
|
||||
embeddingRespErr error
|
||||
tokenizeResp []int
|
||||
tokenizeRespErr error
|
||||
@ -626,7 +626,7 @@ func (s *mockLlm) WaitUntilRunning(ctx context.Context) error { return s.waitRes
|
||||
func (s *mockLlm) Completion(ctx context.Context, req llm.CompletionRequest, fn func(llm.CompletionResponse)) error {
|
||||
return s.completionResp
|
||||
}
|
||||
func (s *mockLlm) Embedding(ctx context.Context, prompt string) ([]float64, error) {
|
||||
func (s *mockLlm) Embedding(ctx context.Context, prompt []string) ([][]float64, error) {
|
||||
return s.embeddingResp, s.embeddingRespErr
|
||||
}
|
||||
func (s *mockLlm) Tokenize(ctx context.Context, content string) ([]int, error) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user