From e3dd90102d3dc47f346046a487806d10c01e59fb Mon Sep 17 00:00:00 2001 From: ParthSareen Date: Wed, 18 Dec 2024 10:49:22 -0800 Subject: [PATCH] WIP --- server/routes_tokenize_test.go | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/server/routes_tokenize_test.go b/server/routes_tokenize_test.go index 86f82851d..260381a9e 100644 --- a/server/routes_tokenize_test.go +++ b/server/routes_tokenize_test.go @@ -23,9 +23,7 @@ func (ml *mockModelLoader) LoadModel(name string, params llama.ModelParams) (*lo return ml.LoadModelFn(name, params) } - return &loadedModel{ - model: mockModel{}, - }, nil + return nil, nil } type mockModel struct { @@ -34,24 +32,18 @@ type mockModel struct { TokenToPieceFn func(token int) string } -func (m *mockModel) Tokenize(text string, addBos bool, addEos bool) ([]int, error) { +func (mockModel) Tokenize(text string, addBos bool, addEos bool) ([]int, error) { return []int{1, 2, 3}, nil } -func (m *mockModel) TokenToPiece(token int) string { +func (mockModel) TokenToPiece(token int) string { return fmt.Sprint(token) } func TestTokenizeHandler(t *testing.T) { gin.SetMode(gin.TestMode) - mockLoader := mockModelLoader{ - LoadModelFn: func(name string, params llama.ModelParams) (*loadedModel, error) { - return &loadedModel{ - model: mockModel{}, - }, nil - }, - } + mockModel := mockModel{} s := Server{ sched: &Scheduler{