Compare commits

..

12 Commits

Author SHA1 Message Date
Roy Han
568416ba17 add suffix 2024-07-16 16:51:27 -07:00
Roy Han
80cba42ab2 Update docs 2024-07-16 16:51:27 -07:00
royjhan
6477a7aca4 Merge branch 'royh-completions-docs' of https://github.com/ollama/ollama into royh-completions-docs 2024-07-16 16:51:11 -07:00
royjhan
51214ddef5 Update docs/openai.md 2024-07-16 16:34:31 -07:00
royjhan
b950d749a9 Update docs/openai.md 2024-07-16 16:34:31 -07:00
Roy Han
3702ed7532 token bug corrected 2024-07-16 16:34:31 -07:00
Roy Han
6266603b17 Update docs 2024-07-16 16:34:31 -07:00
royjhan
2644c4e682 Update docs/openai.md 2024-07-08 14:46:05 -07:00
royjhan
04cde43b2a Update docs/openai.md 2024-07-08 14:44:16 -07:00
Roy Han
105e36765d token bug corrected 2024-07-03 15:03:54 -07:00
royjhan
fa7be5aab4 Merge branch 'main' into royh-completions-docs 2024-07-02 14:52:56 -07:00
Roy Han
02169f3e60 Update docs 2024-06-26 14:30:28 -07:00
4 changed files with 82 additions and 52 deletions

View File

@@ -27,6 +27,11 @@ chat_completion = client.chat.completions.create(
],
model='llama3',
)
completion = client.completions.create(
model="llama3",
prompt="Say this is a test"
)
```
### OpenAI JavaScript library
@@ -45,6 +50,11 @@ const chatCompletion = await openai.chat.completions.create({
messages: [{ role: 'user', content: 'Say this is a test' }],
model: 'llama3',
})
const completion = await openai.completions.create({
model: "llama3",
prompt: "Say this is a test.",
})
```
### `curl`
@@ -66,6 +76,12 @@ curl http://localhost:11434/v1/chat/completions \
]
}'
curl http://localhost:11434/v1/completions \
-H "Content-Type: application/json" \
-d '{
"model": "llama3",
"prompt": "Say this is a test"
}'
```
## Endpoints
@@ -103,8 +119,71 @@ curl http://localhost:11434/v1/chat/completions \
- [ ] `user`
- [ ] `n`
### `/v1/completions`
#### Supported features
- [x] Completions
- [x] Streaming
- [x] JSON mode
- [x] Reproducible outputs
- [ ] Logprobs
#### Supported request fields
- [x] `model`
- [x] `prompt`
- [x] `frequency_penalty`
- [x] `presence_penalty`
- [x] `seed`
- [x] `stop`
- [x] `stream`
- [x] `temperature`
- [x] `top_p`
- [x] `max_tokens`
- [x] `suffix`
- [ ] `best_of`
- [ ] `echo`
- [ ] `logit_bias`
- [ ] `user`
- [ ] `n`
#### Notes
- `prompt` currently only accepts a string
### `/v1/completions`
#### Supported features
- [x] Completions
- [x] Streaming
- [x] JSON mode
- [x] Reproducible outputs
- [ ] Logprobs
#### Supported request fields
- [x] `model`
- [x] `prompt`
- [x] `frequency_penalty`
- [x] `presence_penalty`
- [x] `seed`
- [x] `stop`
- [x] `stream`
- [x] `temperature`
- [x] `top_p`
- [x] `max_tokens`
- [ ] `best_of`
- [ ] `echo`
- [ ] `suffix`
- [ ] `logit_bias`
- [ ] `user`
- [ ] `n`
#### Notes
- `prompt` currently only accepts a string
- `usage.prompt_tokens` will be 0 for completions where prompt evaluation is cached
## Models

View File

@@ -492,12 +492,6 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
layers = append(layers, baseLayer.Layer)
}
case "license", "template", "system":
if c.Name == "template" {
if _, err := template.Parse(c.Args); err != nil {
return fmt.Errorf("%w: %s", errBadTemplate, err)
}
}
if c.Name != "license" {
// replace
layers = slices.DeleteFunc(layers, func(layer *Layer) bool {

View File

@@ -56,7 +56,6 @@ func init() {
}
var errRequired = errors.New("is required")
var errBadTemplate = errors.New("template error")
func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options, error) {
opts := api.DefaultOptions()
@@ -614,9 +613,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
defer cancel()
quantization := cmp.Or(r.Quantize, r.Quantization)
if err := CreateModel(ctx, name, filepath.Dir(r.Path), strings.ToUpper(quantization), f, fn); errors.Is(err, errBadTemplate) {
ch <- gin.H{"error": err.Error(), "status": http.StatusBadRequest}
} else if err != nil {
if err := CreateModel(ctx, name, filepath.Dir(r.Path), strings.ToUpper(quantization), f, fn); err != nil {
ch <- gin.H{"error": err.Error()}
}
}()
@@ -1204,15 +1201,11 @@ func waitForStream(c *gin.Context, ch chan interface{}) {
return
}
case gin.H:
status, ok := r["status"].(int)
if !ok {
status = http.StatusInternalServerError
}
if errorMsg, ok := r["error"].(string); ok {
c.JSON(status, gin.H{"error": errorMsg})
c.JSON(http.StatusInternalServerError, gin.H{"error": errorMsg})
return
} else {
c.JSON(status, gin.H{"error": "unexpected error format in progress response"})
c.JSON(http.StatusInternalServerError, gin.H{"error": "unexpected error format in progress response"})
return
}
default:

View File

@@ -491,42 +491,6 @@ func TestCreateTemplateSystem(t *testing.T) {
if string(system) != "Say bye!" {
t.Errorf("expected \"Say bye!\", actual %s", system)
}
t.Run("incomplete template", func(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusBadRequest {
t.Fatalf("expected status code 400, actual %d", w.Code)
}
})
t.Run("template with unclosed if", func(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ if .Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusBadRequest {
t.Fatalf("expected status code 400, actual %d", w.Code)
}
})
t.Run("template with undefined function", func(t *testing.T) {
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
Name: "test",
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ Prompt }}", createBinFile(t, nil, nil)),
Stream: &stream,
})
if w.Code != http.StatusBadRequest {
t.Fatalf("expected status code 400, actual %d", w.Code)
}
})
}
func TestCreateLicenses(t *testing.T) {