Compare commits

...

30 Commits

Author SHA1 Message Date
Patrick Devine
e6d0062c13 move model struct 2023-07-16 17:00:09 -07:00
Patrick Devine
2e1394e405 add progressbar for model pulls 2023-07-16 16:43:11 -07:00
Jeffrey Morgan
95cc9a11db fix go warnings 2023-07-16 16:34:04 -07:00
Jeffrey Morgan
be233da145 make blobs directory if it does not exist 2023-07-16 16:30:07 -07:00
Jeffrey Morgan
6228a5f39f mkdirp new manifest directories 2023-07-16 16:30:07 -07:00
Patrick Devine
0573eae4b4 changes to the parser, FROM line, and fix commands 2023-07-16 16:30:07 -07:00
Patrick Devine
6e2be5a8a0 add create, pull, and push 2023-07-16 16:30:07 -07:00
Patrick Devine
48be78438a add the parser 2023-07-16 16:30:07 -07:00
Patrick Devine
86f3c1c3b9 basic distribution w/ push/pull 2023-07-16 16:30:05 -07:00
Jeffrey Morgan
6fdea03049 docs: remove python.md 2023-07-14 21:41:46 -07:00
Michael Yang
38021ba494 Merge pull request #83 from jmorganca/multibyte-responses
fix multibyte responses
2023-07-14 20:12:12 -07:00
Michael Yang
6c9fa573ae Merge pull request #82 from jmorganca/filepath
windows build
2023-07-14 20:11:55 -07:00
Michael Yang
40c9dc0a31 fix multibyte responses 2023-07-14 20:11:44 -07:00
Michael Yang
0142660bd4 size_t 2023-07-14 17:29:16 -07:00
Michael Yang
743e957d88 use filepath for os compat 2023-07-14 17:27:14 -07:00
Jeffrey Morgan
560f36e6c8 app: set first-time-run to true instead of false 2023-07-14 16:50:12 -07:00
hoyyeva
e88dd25bab ollama app welcome screen for first time run (#80) 2023-07-14 16:34:24 -07:00
Michael Yang
567e74e7d7 Merge pull request #81 from jmorganca/fix-race-2
fix race
2023-07-14 15:12:01 -07:00
Michael Yang
5ade3db040 fix race
block on write which only returns when the channel is closed. this is
contrary to the previous arrangement where the handler may return but
the stream hasn't finished writing. it can lead to the client receiving
unexpected responses (since the request has been handled) or worst case
a nil-pointer dereference as the stream tries to flush a nil writer
2023-07-14 15:10:46 -07:00
Michael Yang
965f9ad033 Merge pull request #77 from jmorganca/mem
continue conversation
2023-07-14 14:57:42 -07:00
Michael Yang
5d1c6b7499 Merge pull request #79 from jmorganca/fix-typo
fix typo
2023-07-14 10:50:44 -07:00
Michael Yang
5fefaa5d4d fix typo 2023-07-14 10:47:18 -07:00
Michael Yang
1775647f76 continue conversation
feed responses back into the llm
2023-07-13 17:13:00 -07:00
Michael Yang
77dc1a6d74 Merge pull request #74 from jmorganca/timings
Timings
2023-07-13 10:17:13 -07:00
Michael Yang
05e08d2310 return more info in generate response 2023-07-13 09:37:32 -07:00
Michael Yang
31590284a7 fix route 2023-07-12 19:21:49 -07:00
Michael Yang
f2863cc7f8 Merge pull request #76 from jmorganca/fix-pull
fix pull race
2023-07-12 19:21:13 -07:00
Jeffrey Morgan
4dd296e155 build app in publish script 2023-07-12 19:16:39 -07:00
Jeffrey Morgan
304f419429 update README.md API reference 2023-07-12 19:16:28 -07:00
Michael Yang
2666d3c206 fix pull race 2023-07-12 19:07:23 -07:00
26 changed files with 3788 additions and 549 deletions

View File

@@ -104,5 +104,5 @@ curl -X POST http://localhost:11343/api/pull -d '{"model": "orca"}'
Complete a prompt
```
curl -X POST http://localhost:11434/api/generate -d '{"model": "orca", "prompt": "hello!", "stream": true}'
curl -X POST http://localhost:11434/api/generate -d '{"model": "orca", "prompt": "hello!"}'
```

View File

@@ -116,3 +116,29 @@ func (c *Client) Pull(ctx context.Context, req *PullRequest, fn PullProgressFunc
return fn(resp)
})
}
type PushProgressFunc func(PushProgress) error
func (c *Client) Push(ctx context.Context, req *PushRequest, fn PushProgressFunc) error {
return c.stream(ctx, http.MethodPost, "/api/push", req, func(bts []byte) error {
var resp PushProgress
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
return fn(resp)
})
}
type CreateProgressFunc func(CreateProgress) error
func (c *Client) Create(ctx context.Context, req *CreateRequest, fn CreateProgressFunc) error {
return c.stream(ctx, http.MethodPost, "/api/create", req, func(bts []byte) error {
var resp CreateProgress
if err := json.Unmarshal(bts, &resp); err != nil {
return err
}
return fn(resp)
})
}

View File

@@ -1,26 +1,94 @@
package api
import "runtime"
type PullRequest struct {
Model string `json:"model"`
}
type PullProgress struct {
Total int64 `json:"total"`
Completed int64 `json:"completed"`
Percent float64 `json:"percent"`
}
import (
"fmt"
"os"
"runtime"
"time"
)
type GenerateRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Model string `json:"model"`
Prompt string `json:"prompt"`
Context []int `json:"context,omitempty"`
Options `json:"options"`
}
type CreateRequest struct {
Name string `json:"name"`
Path string `json:"path"`
}
type CreateProgress struct {
Status string `json:"status"`
}
type PullRequest struct {
Name string `json:"name"`
Username string `json:"username"`
Password string `json:"password"`
}
type PullProgress struct {
Status string `json:"status"`
Digest string `json:"digest,omitempty"`
Total int `json:"total,omitempty"`
Completed int `json:"completed,omitempty"`
Percent float64 `json:"percent,omitempty"`
}
type PushRequest struct {
Name string `json:"name"`
Username string `json:"username"`
Password string `json:"password"`
}
type PushProgress struct {
Status string `json:"status"`
Digest string `json:"digest,omitempty"`
Total int `json:"total,omitempty"`
Completed int `json:"completed,omitempty"`
Percent float64 `json:"percent,omitempty"`
}
type GenerateResponse struct {
Response string `json:"response"`
Model string `json:"model"`
CreatedAt time.Time `json:"created_at"`
Response string `json:"response,omitempty"`
Done bool `json:"done"`
Context []int `json:"context,omitempty"`
TotalDuration time.Duration `json:"total_duration,omitempty"`
PromptEvalCount int `json:"prompt_eval_count,omitempty"`
PromptEvalDuration time.Duration `json:"prompt_eval_duration,omitempty"`
EvalCount int `json:"eval_count,omitempty"`
EvalDuration time.Duration `json:"eval_duration,omitempty"`
}
func (r *GenerateResponse) Summary() {
if r.TotalDuration > 0 {
fmt.Fprintf(os.Stderr, "total duration: %v\n", r.TotalDuration)
}
if r.PromptEvalCount > 0 {
fmt.Fprintf(os.Stderr, "prompt eval count: %d token(s)\n", r.PromptEvalCount)
}
if r.PromptEvalDuration > 0 {
fmt.Fprintf(os.Stderr, "prompt eval duration: %s\n", r.PromptEvalDuration)
fmt.Fprintf(os.Stderr, "prompt eval rate: %.2f tokens/s\n", float64(r.PromptEvalCount)/r.PromptEvalDuration.Seconds())
}
if r.EvalCount > 0 {
fmt.Fprintf(os.Stderr, "eval count: %d token(s)\n", r.EvalCount)
}
if r.EvalDuration > 0 {
fmt.Fprintf(os.Stderr, "eval duration: %s\n", r.EvalDuration)
fmt.Fprintf(os.Stderr, "eval rate: %.2f tokens/s\n", float64(r.EvalCount)/r.EvalDuration.Seconds())
}
}
type Options struct {
@@ -65,7 +133,7 @@ func DefaultOptions() Options {
UseNUMA: false,
NumCtx: 512,
NumCtx: 2048,
NumBatch: 512,
NumGPU: 1,
LowVRAM: false,

View File

@@ -58,7 +58,7 @@ const config: ForgeConfig = {
new AutoUnpackNativesPlugin({}),
new WebpackPlugin({
mainConfig,
devContentSecurityPolicy: `default-src * 'unsafe-eval' 'unsafe-inline'`,
devContentSecurityPolicy: `default-src * 'unsafe-eval' 'unsafe-inline'; img-src data: 'self'`,
renderer: {
config: rendererConfig,
nodeIntegration: true,

2274
app/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -30,6 +30,7 @@
"@electron-forge/plugin-auto-unpack-natives": "^6.2.1",
"@electron-forge/plugin-webpack": "^6.2.1",
"@electron-forge/publisher-github": "^6.2.1",
"@svgr/webpack": "^8.0.1",
"@types/chmodr": "^1.0.0",
"@types/node": "^20.4.0",
"@types/react": "^18.2.14",
@@ -54,17 +55,21 @@
"prettier": "^2.8.8",
"prettier-plugin-tailwindcss": "^0.3.0",
"style-loader": "^3.3.3",
"svg-inline-loader": "^0.8.2",
"tailwindcss": "^3.3.2",
"ts-loader": "^9.4.3",
"ts-node": "^10.9.1",
"typescript": "~4.5.4",
"url-loader": "^4.1.1",
"webpack": "^5.88.0",
"webpack-cli": "^5.1.4",
"webpack-dev-server": "^4.15.1"
},
"dependencies": {
"@electron/remote": "^2.0.10",
"@heroicons/react": "^2.0.18",
"@segment/analytics-node": "^1.0.0",
"copy-to-clipboard": "^3.3.3",
"electron-squirrel-startup": "^1.0.0",
"electron-store": "^8.1.0",
"react": "^18.2.0",

View File

@@ -1,160 +1,127 @@
import { useState } from 'react'
import path from 'path'
import os from 'os'
import { dialog, getCurrentWindow } from '@electron/remote'
import { useState } from "react"
import copy from 'copy-to-clipboard'
import { exec } from 'child_process'
import * as path from 'path'
import * as fs from 'fs'
import { DocumentDuplicateIcon } from '@heroicons/react/24/outline'
import { app } from '@electron/remote'
import OllamaIcon from './ollama.svg'
const API_URL = 'http://127.0.0.1:7734'
const ollama = app.isPackaged
? path.join(process.resourcesPath, 'ollama')
: path.resolve(process.cwd(), '..', 'ollama')
type Message = {
sender: 'bot' | 'human'
content: string
}
function installCLI(callback: () => void) {
const symlinkPath = '/usr/local/bin/ollama'
const userInfo = os.userInfo()
async function generate(prompt: string, model: string, callback: (res: string) => void) {
const result = await fetch(`${API_URL}/generate`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
prompt,
model,
}),
})
if (!result.ok) {
if (fs.existsSync(symlinkPath) && fs.readlinkSync(symlinkPath) === ollama) {
callback && callback()
return
}
let reader = result.body.getReader()
while (true) {
const { done, value } = await reader.read()
if (done) {
break
const command = `
do shell script "ln -F -s ${ollama} /usr/local/bin/ollama" with administrator privileges
`
exec(`osascript -e '${command}'`, (error: Error | null, stdout: string, stderr: string) => {
if (error) {
console.error(`cli: failed to install cli: ${error.message}`)
callback && callback()
return
}
let decoder = new TextDecoder()
let str = decoder.decode(value)
let re = /}\s*{/g
str = '[' + str.replace(re, '},{') + ']'
let messages = JSON.parse(str)
for (const message of messages) {
const choice = message.choices[0]
callback(choice.text)
if (choice.finish_reason === 'stop') {
break
}
}
}
return
callback && callback()
})
}
export default function () {
const [prompt, setPrompt] = useState('')
const [messages, setMessages] = useState<Message[]>([])
const [model, setModel] = useState('')
const [generating, setGenerating] = useState(false)
const [step, setStep] = useState(0)
const command = 'ollama run orca'
return (
<div className='flex min-h-screen flex-1 flex-col justify-between bg-white'>
<header className='drag sticky top-0 z-50 flex h-14 w-full flex-row items-center border-b border-black/10 bg-white/75 backdrop-blur-md'>
<div className='mx-auto w-full max-w-xl leading-none'>
<h1 className='text-sm font-medium'>{path.basename(model).replace('.bin', '')}</h1>
</div>
</header>
{model ? (
<section className='mx-auto mb-10 w-full max-w-xl flex-1 break-words'>
{messages.map((m, i) => (
<div className='my-4 flex gap-4' key={i}>
<div className='flex-none pr-1 text-lg'>
{m.sender === 'human' ? (
<div className='mt-px flex h-6 w-6 items-center justify-center rounded-md bg-neutral-200 text-sm text-neutral-700'>
{userInfo.username[0].toUpperCase()}
</div>
) : (
<div className='mt-0.5 flex h-6 w-6 items-center justify-center rounded-md bg-blue-600 text-sm text-white'>
{path.basename(model)[0].toUpperCase()}
</div>
)}
</div>
<div className='flex-1 text-gray-800'>
{m.content}
{m.sender === 'bot' && generating && i === messages.length - 1 && (
<span className='blink relative -top-[3px] left-1 text-[10px]'></span>
)}
</div>
</div>
))}
</section>
) : (
<section className='flex flex-1 select-none flex-col items-center justify-center pb-20'>
<h2 className='text-3xl font-light text-neutral-400'>No model selected</h2>
<button
onClick={async () => {
const res = await dialog.showOpenDialog(getCurrentWindow(), {
properties: ['openFile', 'multiSelections'],
})
if (res.canceled) {
return
}
setModel(res.filePaths[0])
}}
className='rounded-dm my-8 rounded-md bg-blue-600 px-4 py-2 text-sm text-white hover:brightness-110'
>
Open file...
</button>
</section>
<div className='flex flex-col justify-between mx-auto w-full pt-16 px-4 min-h-screen bg-white'>
{step === 0 && (
<>
<div className="mx-auto text-center">
<h1 className="mt-4 mb-6 text-2xl tracking-tight text-gray-900">Welcome to Ollama</h1>
<p className="mx-auto w-[65%] text-sm text-gray-400">
Lets get you up and running with your own large language models.
</p>
<button
onClick={() => {
setStep(1)
}}
className='mx-auto w-[40%] rounded-dm my-8 rounded-md bg-black px-4 py-2 text-sm text-white hover:brightness-110'
>
Next
</button>
</div>
<div className="mx-auto">
<OllamaIcon />
</div>
</>
)}
<div className='sticky bottom-0 bg-gradient-to-b from-transparent to-white'>
{model && (
<textarea
autoFocus
rows={1}
value={prompt}
placeholder='Send a message...'
onChange={e => setPrompt(e.target.value)}
className='mx-auto my-4 block w-full max-w-xl resize-none rounded-xl border border-gray-200 px-5 py-3.5 text-[15px] shadow-lg shadow-black/5 focus:outline-none'
onKeyDownCapture={async e => {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
if (generating) {
return
}
if (!prompt) {
return
}
await setMessages(messages => {
return [...messages, { sender: 'human', content: prompt }, { sender: 'bot', content: '' }]
})
setPrompt('')
setGenerating(true)
await generate(prompt, model, res => {
setMessages(messages => {
let last = messages[messages.length - 1]
return [...messages.slice(0, messages.length - 1), { ...last, content: last.content + res }]
{step === 1 && (
<>
<div className="flex flex-col space-y-28 mx-auto text-center">
<h1 className="mt-4 text-2xl tracking-tight text-gray-900">Install the command line</h1>
<pre className="mx-auto text-4xl text-gray-400">
&gt; ollama
</pre>
<div className="mx-auto">
<button
onClick={() => {
// install the command line
installCLI(() => {
window.focus()
setStep(2)
})
})
setGenerating(false)
}
}}
></textarea>
)}
</div>
}}
className='mx-auto w-[60%] rounded-dm rounded-md bg-black px-4 py-2 text-sm text-white hover:brightness-110'
>
Install
</button>
<p className="mx-auto w-[70%] text-xs text-gray-400 my-4">
You will be prompted for administrator access
</p>
</div>
</div>
</>
)}
{step === 2 && (
<>
<div className="flex flex-col space-y-20 mx-auto text-center">
<h1 className="mt-4 text-2xl tracking-tight text-gray-900">Run your first model</h1>
<div className="flex flex-col">
<div className="group relative flex items-center">
<pre className="text-start w-full language-none rounded-md bg-gray-100 px-4 py-3 text-2xs leading-normal">
{command}
</pre>
<button
className='absolute right-[5px] rounded-md border bg-white/90 px-2 py-2 text-gray-400 opacity-0 backdrop-blur-xl hover:text-gray-600 group-hover:opacity-100'
onClick={() => {
copy(command)
}}
>
<DocumentDuplicateIcon className="h-4 w-4 text-gray-500" />
</button>
</div>
<p className="mx-auto w-[70%] text-xs text-gray-400 my-4">
Run this command in your favorite terminal.
</p>
</div>
<button
onClick={() => {
window.close()
}}
className='mx-auto w-[60%] rounded-dm rounded-md bg-black px-4 py-2 text-sm text-white hover:brightness-110'
>
Finish
</button>
</div>
</>
)}
</div>
)
}
}

4
app/src/declarations.d.ts vendored Normal file
View File

@@ -0,0 +1,4 @@
declare module '*.svg' {
const content: string;
export default content;
}

View File

@@ -1,10 +1,9 @@
import { spawn, exec } from 'child_process'
import { app, autoUpdater, dialog, Tray, Menu } from 'electron'
import { spawn } from 'child_process'
import { app, autoUpdater, dialog, Tray, Menu, BrowserWindow } from 'electron'
import Store from 'electron-store'
import winston from 'winston'
import 'winston-daily-rotate-file'
import * as path from 'path'
import * as fs from 'fs'
import { analytics, id } from './telemetry'
@@ -12,6 +11,9 @@ require('@electron/remote/main').initialize()
const store = new Store()
let tray: Tray | null = null
let welcomeWindow: BrowserWindow | null = null
declare const MAIN_WINDOW_WEBPACK_ENTRY: string
const logger = winston.createLogger({
transports: [
@@ -30,7 +32,36 @@ if (!SingleInstanceLock) {
app.quit()
}
const createSystemtray = () => {
function firstRunWindow() {
// Create the browser window.
welcomeWindow = new BrowserWindow({
width: 400,
height: 500,
frame: false,
fullscreenable: false,
resizable: false,
movable: false,
transparent: true,
webPreferences: {
nodeIntegration: true,
contextIsolation: false,
},
})
require('@electron/remote/main').enable(welcomeWindow.webContents)
// and load the index.html of the app.
welcomeWindow.loadURL(MAIN_WINDOW_WEBPACK_ENTRY)
// for debugging
// welcomeWindow.webContents.openDevTools()
if (process.platform === 'darwin') {
app.dock.hide()
}
}
function createSystemtray() {
let iconPath = path.join(__dirname, '..', '..', 'assets', 'ollama_icon_16x16Template.png')
if (app.isPackaged) {
@@ -49,8 +80,6 @@ if (require('electron-squirrel-startup')) {
app.quit()
}
const ollama = path.join(process.resourcesPath, 'ollama')
function server() {
const binary = app.isPackaged
? path.join(process.resourcesPath, 'ollama')
@@ -81,51 +110,12 @@ function server() {
})
}
function installCLI() {
const symlinkPath = '/usr/local/bin/ollama'
if (fs.existsSync(symlinkPath) && fs.readlinkSync(symlinkPath) === ollama) {
return
}
dialog
.showMessageBox({
type: 'info',
title: 'Ollama CLI installation',
message: 'To make the Ollama command work in your terminal, it needs administrator privileges.',
buttons: ['OK'],
})
.then(result => {
if (result.response === 0) {
const command = `
do shell script "ln -F -s ${ollama} /usr/local/bin/ollama" with administrator privileges
`
exec(`osascript -e '${command}'`, (error: Error | null, stdout: string, stderr: string) => {
if (error) {
logger.error(`cli: failed to install cli: ${error.message}`)
return
}
logger.info(stdout)
logger.error(stderr)
})
}
})
if (process.platform === 'darwin') {
app.dock.hide()
}
app.on('ready', () => {
if (process.platform === 'darwin') {
app.dock.hide()
if (!store.has('first-time-run')) {
// This is the first run
app.setLoginItemSettings({ openAtLogin: true })
store.set('first-time-run', false)
} else {
// The app has been run before
app.setLoginItemSettings({ openAtLogin: app.getLoginItemSettings().openAtLogin })
}
if (app.isPackaged) {
if (!app.isInApplicationsFolder()) {
const chosen = dialog.showMessageBoxSync({
@@ -157,13 +147,21 @@ app.on('ready', () => {
}
}
}
installCLI()
}
}
createSystemtray()
server()
if (!store.has('first-time-run')) {
// This is the first run
app.setLoginItemSettings({ openAtLogin: true })
firstRunWindow()
store.set('first-time-run', true)
} else {
// The app has been run before
app.setLoginItemSettings({ openAtLogin: app.getLoginItemSettings().openAtLogin })
}
})
// Quit when all windows are closed, except on macOS. There, it's common

9
app/src/ollama.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -28,4 +28,8 @@ export const rules: Required<ModuleOptions>['rules'] = [
},
},
},
{
test: /\.svg$/,
use: ['@svgr/webpack'],
},
]

View File

@@ -9,7 +9,7 @@ import (
"net"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
@@ -27,7 +27,24 @@ func cacheDir() string {
panic(err)
}
return path.Join(home, ".ollama")
return filepath.Join(home, ".ollama")
}
func create(cmd *cobra.Command, args []string) error {
filename, _ := cmd.Flags().GetString("file")
client := api.NewClient()
request := api.CreateRequest{Name: args[0], Path: filename}
fn := func(resp api.CreateProgress) error {
fmt.Println(resp.Status)
return nil
}
if err := client.Create(context.Background(), &request, fn); err != nil {
return err
}
return nil
}
func RunRun(cmd *cobra.Command, args []string) error {
@@ -51,41 +68,74 @@ func RunRun(cmd *cobra.Command, args []string) error {
return RunGenerate(cmd, args)
}
func pull(model string) error {
func push(cmd *cobra.Command, args []string) error {
client := api.NewClient()
var bar *progressbar.ProgressBar
return client.Pull(
context.Background(),
&api.PullRequest{Model: model},
func(progress api.PullProgress) error {
if bar == nil {
if progress.Percent >= 100 {
// already downloaded
return nil
}
bar = progressbar.DefaultBytes(progress.Total)
}
request := api.PushRequest{Name: args[0]}
fn := func(resp api.PushProgress) error {
fmt.Println(resp.Status)
return nil
}
return bar.Set64(progress.Completed)
},
)
if err := client.Push(context.Background(), &request, fn); err != nil {
return err
}
return nil
}
func RunGenerate(_ *cobra.Command, args []string) error {
func RunPull(cmd *cobra.Command, args []string) error {
return pull(args[0])
}
func pull(model string) error {
client := api.NewClient()
var bar *progressbar.ProgressBar
currentLayer := ""
request := api.PullRequest{Name: model}
fn := func(resp api.PullProgress) error {
if resp.Digest != currentLayer && resp.Digest != "" {
if currentLayer != "" {
fmt.Println()
}
currentLayer = resp.Digest
layerStr := resp.Digest[7:23] + "..."
bar = progressbar.DefaultBytes(
int64(resp.Total),
"pulling "+layerStr,
)
} else if resp.Digest == currentLayer && resp.Digest != "" {
bar.Set(resp.Completed)
} else {
currentLayer = ""
fmt.Println(resp.Status)
}
return nil
}
if err := client.Pull(context.Background(), &request, fn); err != nil {
return err
}
return nil
}
func RunGenerate(cmd *cobra.Command, args []string) error {
if len(args) > 1 {
// join all args into a single prompt
return generate(args[0], strings.Join(args[1:], " "))
return generate(cmd, args[0], strings.Join(args[1:], " "))
}
if term.IsTerminal(int(os.Stdin.Fd())) {
return generateInteractive(args[0])
return generateInteractive(cmd, args[0])
}
return generateBatch(args[0])
return generateBatch(cmd, args[0])
}
func generate(model, prompt string) error {
var generateContextKey struct{}
func generate(cmd *cobra.Command, model, prompt string) error {
if len(strings.TrimSpace(prompt)) > 0 {
client := api.NewClient()
@@ -108,13 +158,24 @@ func generate(model, prompt string) error {
}
}()
request := api.GenerateRequest{Model: model, Prompt: prompt}
var latest api.GenerateResponse
generateContext, ok := cmd.Context().Value(generateContextKey).([]int)
if !ok {
generateContext = []int{}
}
request := api.GenerateRequest{Model: model, Prompt: prompt, Context: generateContext}
fn := func(resp api.GenerateResponse) error {
if !spinner.IsFinished() {
spinner.Finish()
}
latest = resp
fmt.Print(resp.Response)
cmd.SetContext(context.WithValue(cmd.Context(), generateContextKey, resp.Context))
return nil
}
@@ -124,16 +185,25 @@ func generate(model, prompt string) error {
fmt.Println()
fmt.Println()
verbose, err := cmd.Flags().GetBool("verbose")
if err != nil {
return err
}
if verbose {
latest.Summary()
}
}
return nil
}
func generateInteractive(model string) error {
func generateInteractive(cmd *cobra.Command, model string) error {
fmt.Print(">>> ")
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
if err := generate(model, scanner.Text()); err != nil {
if err := generate(cmd, model, scanner.Text()); err != nil {
return err
}
@@ -143,12 +213,12 @@ func generateInteractive(model string) error {
return nil
}
func generateBatch(model string) error {
func generateBatch(cmd *cobra.Command, model string) error {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
prompt := scanner.Text()
fmt.Printf(">>> %s\n", prompt)
if err := generate(model, prompt); err != nil {
if err := generate(cmd, model, prompt); err != nil {
return err
}
}
@@ -187,12 +257,21 @@ func NewCLI() *cobra.Command {
},
PersistentPreRunE: func(_ *cobra.Command, args []string) error {
// create the models directory and it's parent
return os.MkdirAll(path.Join(cacheDir(), "models"), 0o700)
return os.MkdirAll(filepath.Join(cacheDir(), "models"), 0o700)
},
}
cobra.EnableCommandSorting = false
createCmd := &cobra.Command{
Use: "create MODEL",
Short: "Create a model from a Modelfile",
Args: cobra.MinimumNArgs(1),
RunE: create,
}
createCmd.Flags().StringP("file", "f", "Modelfile", "Name of the Modelfile (default \"Modelfile\")")
runCmd := &cobra.Command{
Use: "run MODEL [PROMPT]",
Short: "Run a model",
@@ -200,6 +279,8 @@ func NewCLI() *cobra.Command {
RunE: RunRun,
}
runCmd.Flags().Bool("verbose", false, "Show timings for response")
serveCmd := &cobra.Command{
Use: "serve",
Aliases: []string{"start"},
@@ -207,9 +288,26 @@ func NewCLI() *cobra.Command {
RunE: RunServer,
}
pullCmd := &cobra.Command{
Use: "pull MODEL",
Short: "Pull a model from a registry",
Args: cobra.MinimumNArgs(1),
RunE: RunPull,
}
pushCmd := &cobra.Command{
Use: "push MODEL",
Short: "Push a model to a registry",
Args: cobra.MinimumNArgs(1),
RunE: push,
}
rootCmd.AddCommand(
serveCmd,
createCmd,
runCmd,
pullCmd,
pushCmd,
)
return rootCmd

View File

@@ -1,64 +0,0 @@
# Python SDK
## Install
```
pip install ollama
```
## Example
```python
import ollama
ollama.generate("orca-mini-3b", "hi")
```
## Reference
### `ollama.generate(model, message)`
Generate a completion
```python
ollama.generate("./llama-7b-ggml.bin", "hi")
```
### `ollama.models()`
List available local models
```python
models = ollama.models()
```
### `ollama.load(model)`
Manually a model for generation
```python
ollama.load("model")
```
### `ollama.unload(model)`
Unload a model
```python
ollama.unload("model")
```
### `ollama.pull(model)`
Download a model
```python
ollama.pull("huggingface.co/thebloke/llama-7b-ggml")
```
### `ollama.search(query)`
Search for compatible models that Ollama can run
```python
ollama.search("llama-7b")
```

View File

@@ -78,10 +78,14 @@ llama_token llama_sample(
*/
import "C"
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"unicode/utf8"
"unsafe"
"github.com/jmorganca/ollama/api"
@@ -147,9 +151,14 @@ func (llm *llama) Close() {
C.llama_print_timings(llm.ctx)
}
func (llm *llama) Predict(prompt string, fn func(string)) error {
if tokens := llm.tokenize(prompt); tokens != nil {
return llm.generate(tokens, fn)
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
if input := llm.tokenize(prompt); input != nil {
embd := make([]C.llama_token, len(ctx))
for i := range ctx {
embd[i] = C.llama_token(ctx[i])
}
return llm.generate(append(embd, input...), fn)
}
return errors.New("llama: tokenize")
@@ -176,7 +185,7 @@ func (llm *llama) detokenize(tokens ...C.llama_token) string {
return sb.String()
}
func (llm *llama) generate(tokens []C.llama_token, fn func(string)) error {
func (llm *llama) generate(input []C.llama_token, fn func(api.GenerateResponse)) error {
var opts C.struct_llama_sample_options
opts.repeat_penalty = C.float(llm.RepeatPenalty)
opts.frequency_penalty = C.float(llm.FrequencyPenalty)
@@ -190,38 +199,70 @@ func (llm *llama) generate(tokens []C.llama_token, fn func(string)) error {
opts.mirostat_tau = C.float(llm.MirostatTau)
opts.mirostat_eta = C.float(llm.MirostatEta)
pastTokens := deque[C.llama_token]{capacity: llm.RepeatLastN}
output := deque[C.llama_token]{capacity: llm.NumCtx}
context := deque[int]{capacity: llm.NumCtx / 2}
for _, in := range input {
context.PushLeft(int(in))
}
var b bytes.Buffer
for C.llama_get_kv_cache_token_count(llm.ctx) < C.int(llm.NumCtx) {
if retval := C.llama_eval(llm.ctx, unsafe.SliceData(tokens), C.int(len(tokens)), C.llama_get_kv_cache_token_count(llm.ctx), C.int(llm.NumThread)); retval != 0 {
if retval := C.llama_eval(llm.ctx, unsafe.SliceData(input), C.int(len(input)), C.llama_get_kv_cache_token_count(llm.ctx), C.int(llm.NumThread)); retval != 0 {
return errors.New("llama: eval")
}
token, err := llm.sample(pastTokens, &opts)
switch {
case errors.Is(err, io.EOF):
return nil
case err != nil:
token, err := llm.sample(output, &opts)
if errors.Is(err, io.EOF) {
break
} else if err != nil {
return err
}
fn(llm.detokenize(token))
b.WriteString(llm.detokenize(token))
if utf8.Valid(b.Bytes()) || b.Len() >= utf8.UTFMax {
// call the callback
fn(api.GenerateResponse{
Response: b.String(),
})
tokens = []C.llama_token{token}
output.PushLeft(token)
context.PushLeft(int(token))
b.Reset()
}
pastTokens.PushLeft(token)
input = []C.llama_token{token}
}
dur := func(ms float64) time.Duration {
d, err := time.ParseDuration(fmt.Sprintf("%fms", ms))
if err != nil {
panic(err)
}
return d
}
timings := C.llama_get_timings(llm.ctx)
fn(api.GenerateResponse{
Done: true,
Context: context.Data(),
PromptEvalCount: int(timings.n_p_eval),
PromptEvalDuration: dur(float64(timings.t_p_eval_ms)),
EvalCount: int(timings.n_eval),
EvalDuration: dur(float64(timings.t_eval_ms)),
})
return nil
}
func (llm *llama) sample(pastTokens deque[C.llama_token], opts *C.struct_llama_sample_options) (C.llama_token, error) {
func (llm *llama) sample(output deque[C.llama_token], opts *C.struct_llama_sample_options) (C.llama_token, error) {
numVocab := int(C.llama_n_vocab(llm.ctx))
logits := unsafe.Slice(C.llama_get_logits(llm.ctx), numVocab)
candidates := make([]C.struct_llama_token_data, 0, numVocab)
for i := 0; i < numVocab; i++ {
candidates = append(candidates, C.llama_token_data{
candidates := deque[C.struct_llama_token_data]{capacity: numVocab}
for i := 0; i < candidates.Cap(); i++ {
candidates.PushLeft(C.struct_llama_token_data{
id: C.int(i),
logit: logits[i],
p: 0,
@@ -230,8 +271,8 @@ func (llm *llama) sample(pastTokens deque[C.llama_token], opts *C.struct_llama_s
token := C.llama_sample(
llm.ctx,
unsafe.SliceData(candidates), C.ulong(len(candidates)),
unsafe.SliceData(pastTokens.Data()), C.ulong(pastTokens.Len()),
unsafe.SliceData(candidates.Data()), C.size_t(candidates.Len()),
unsafe.SliceData(output.Data()), C.size_t(output.Len()),
opts)
if token != C.llama_token_eos() {
return token, nil

View File

@@ -1,9 +1,11 @@
package main
import (
"context"
"github.com/jmorganca/ollama/cmd"
)
func main() {
cmd.NewCLI().Execute()
cmd.NewCLI().ExecuteContext(context.Background())
}

77
parser/parser.go Normal file
View File

@@ -0,0 +1,77 @@
package parser
import (
"bufio"
"fmt"
"io"
"strings"
)
type Command struct {
Name string
Arg string
}
func Parse(reader io.Reader) ([]Command, error) {
var commands []Command
var foundModel bool
scanner := bufio.NewScanner(reader)
multiline := false
var multilineCommand *Command
for scanner.Scan() {
line := scanner.Text()
if multiline {
// If we're in a multiline string and the line is """, end the multiline string.
if strings.TrimSpace(line) == `"""` {
multiline = false
commands = append(commands, *multilineCommand)
} else {
// Otherwise, append the line to the multiline string.
multilineCommand.Arg += "\n" + line
}
continue
}
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
command := Command{}
switch fields[0] {
case "FROM":
command.Name = "model"
command.Arg = fields[1]
if command.Arg == "" {
return nil, fmt.Errorf("no model specified in FROM line")
}
foundModel = true
case "PROMPT":
command.Name = "prompt"
if fields[1] == `"""` {
multiline = true
multilineCommand = &command
multilineCommand.Arg = ""
} else {
command.Arg = strings.Join(fields[1:], " ")
}
case "PARAMETER":
command.Name = fields[1]
command.Arg = strings.Join(fields[2:], " ")
default:
continue
}
if !multiline {
commands = append(commands, command)
}
}
if !foundModel {
return nil, fmt.Errorf("no FROM line for the model was specified")
}
if multiline {
return nil, fmt.Errorf("unclosed multiline string")
}
return commands, scanner.Err()
}

View File

@@ -12,13 +12,15 @@ ARCH=$(go env GOARCH)
go build .
npm --prefix app run make:sign
# Create a new tag if it doesn't exist.
if ! git rev-parse v$VERSION >/dev/null 2>&1; then
git tag v$VERSION
git push origin v$VERSION
fi
mkdir dist
mkdir -p dist
cp app/out/make/zip/${OS}/${ARCH}/Ollama-${OS}-${ARCH}-${VERSION}.zip dist/Ollama-${OS}-${ARCH}.zip
cp ./ollama dist/ollama-${OS}-${ARCH}

842
server/images.go Normal file
View File

@@ -0,0 +1,842 @@
package server
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/parser"
)
var DefaultRegistry string = "https://registry.ollama.ai"
type Model struct {
Name string `json:"name"`
ModelPath string
Prompt string
Options api.Options
}
type ManifestV2 struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Config Layer `json:"config"`
Layers []*Layer `json:"layers"`
}
type Layer struct {
MediaType string `json:"mediaType"`
Digest string `json:"digest"`
Size int `json:"size"`
}
type LayerWithBuffer struct {
Layer
Buffer *bytes.Buffer
}
type ConfigV2 struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
RootFS RootFS `json:"rootfs"`
}
type RootFS struct {
Type string `json:"type"`
DiffIDs []string `json:"diff_ids"`
}
func GetManifest(name string) (*ManifestV2, error) {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
fp := filepath.Join(home, ".ollama/models/manifests", name)
_, err = os.Stat(fp)
if os.IsNotExist(err) {
return nil, fmt.Errorf("couldn't find model '%s'", name)
}
var manifest *ManifestV2
f, err := os.Open(fp)
if err != nil {
return nil, fmt.Errorf("couldn't open file '%s'", fp)
}
decoder := json.NewDecoder(f)
err = decoder.Decode(&manifest)
if err != nil {
return nil, err
}
return manifest, nil
}
func GetModel(name string) (*Model, error) {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
manifest, err := GetManifest(name)
if err != nil {
return nil, err
}
model := &Model{
Name: name,
}
for _, layer := range manifest.Layers {
filename := filepath.Join(home, ".ollama/models/blobs", layer.Digest)
switch layer.MediaType {
case "application/vnd.ollama.image.model":
model.ModelPath = filename
case "application/vnd.ollama.image.prompt":
data, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
model.Prompt = string(data)
case "application/vnd.ollama.image.params":
/*
f, err = os.Open(filename)
if err != nil {
return nil, err
}
*/
var opts api.Options
/*
decoder = json.NewDecoder(f)
err = decoder.Decode(&opts)
if err != nil {
return nil, err
}
*/
model.Options = opts
}
}
return model, nil
}
func getAbsPath(fn string) (string, error) {
if strings.HasPrefix(fn, "~/") {
home, err := os.UserHomeDir()
if err != nil {
log.Printf("error getting home directory: %v", err)
return "", err
}
fn = strings.Replace(fn, "~", home, 1)
}
return filepath.Abs(fn)
}
func CreateModel(name string, mf io.Reader, fn func(status string)) error {
fn("parsing modelfile")
commands, err := parser.Parse(mf)
if err != nil {
fn(fmt.Sprintf("error: %v", err))
return err
}
var layers []*LayerWithBuffer
param := make(map[string]string)
for _, c := range commands {
log.Printf("[%s] - %s\n", c.Name, c.Arg)
switch c.Name {
case "model":
fn("looking for model")
mf, err := GetManifest(c.Arg)
if err != nil {
// if we couldn't read the manifest, try getting the bin file
fp, err := getAbsPath(c.Arg)
if err != nil {
fn("error determing path. exiting.")
return err
}
fn("creating model layer")
file, err := os.Open(fp)
if err != nil {
fn(fmt.Sprintf("couldn't find model '%s'", c.Arg))
return fmt.Errorf("failed to open file: %v", err)
}
defer file.Close()
l, err := CreateLayer(file)
if err != nil {
fn(fmt.Sprintf("couldn't create model layer: %v", err))
return fmt.Errorf("failed to create layer: %v", err)
}
l.MediaType = "application/vnd.ollama.image.model"
layers = append(layers, l)
} else {
log.Printf("manifest = %#v", mf)
for _, l := range mf.Layers {
newLayer, err := GetLayerWithBufferFromLayer(l)
if err != nil {
fn(fmt.Sprintf("couldn't read layer: %v", err))
return err
}
layers = append(layers, newLayer)
}
}
case "prompt":
fn("creating prompt layer")
// remove the prompt layer if one exists
layers = removeLayerFromLayers(layers, "application/vnd.ollama.image.prompt")
prompt := strings.NewReader(c.Arg)
l, err := CreateLayer(prompt)
if err != nil {
fn(fmt.Sprintf("couldn't create prompt layer: %v", err))
return fmt.Errorf("failed to create layer: %v", err)
}
l.MediaType = "application/vnd.ollama.image.prompt"
layers = append(layers, l)
default:
param[c.Name] = c.Arg
}
}
// Create a single layer for the parameters
fn("creating parameter layer")
if len(param) > 0 {
layers = removeLayerFromLayers(layers, "application/vnd.ollama.image.params")
paramData, err := paramsToReader(param)
if err != nil {
return fmt.Errorf("couldn't create params json: %v", err)
}
l, err := CreateLayer(paramData)
if err != nil {
return fmt.Errorf("failed to create layer: %v", err)
}
l.MediaType = "application/vnd.ollama.image.params"
layers = append(layers, l)
}
digests, err := getLayerDigests(layers)
if err != nil {
return err
}
var manifestLayers []*Layer
for _, l := range layers {
manifestLayers = append(manifestLayers, &l.Layer)
}
// Create a layer for the config object
fn("creating config layer")
cfg, err := createConfigLayer(digests)
if err != nil {
return err
}
layers = append(layers, cfg)
err = SaveLayers(layers, fn, false)
if err != nil {
fn(fmt.Sprintf("error saving layers: %v", err))
return err
}
// Create the manifest
fn("writing manifest")
err = CreateManifest(name, cfg, manifestLayers)
if err != nil {
fn(fmt.Sprintf("error creating manifest: %v", err))
return err
}
fn("success")
return nil
}
func removeLayerFromLayers(layers []*LayerWithBuffer, mediaType string) []*LayerWithBuffer {
j := 0
for _, l := range layers {
if l.MediaType != mediaType {
layers[j] = l
j++
}
}
return layers[:j]
}
func SaveLayers(layers []*LayerWithBuffer, fn func(status string), force bool) error {
home, err := os.UserHomeDir()
if err != nil {
log.Printf("error getting home directory: %v", err)
return err
}
dir := filepath.Join(home, ".ollama/models/blobs")
err = os.MkdirAll(dir, 0o700)
if err != nil {
return fmt.Errorf("make blobs directory: %w", err)
}
// Write each of the layers to disk
for _, layer := range layers {
fp := filepath.Join(dir, layer.Digest)
_, err = os.Stat(fp)
if os.IsNotExist(err) || force {
fn(fmt.Sprintf("writing layer %s", layer.Digest))
out, err := os.Create(fp)
if err != nil {
log.Printf("couldn't create %s", fp)
return err
}
defer out.Close()
_, err = io.Copy(out, layer.Buffer)
if err != nil {
return err
}
} else {
fn(fmt.Sprintf("using already created layer %s", layer.Digest))
}
}
return nil
}
func CreateManifest(name string, cfg *LayerWithBuffer, layers []*Layer) error {
home, err := os.UserHomeDir()
if err != nil {
log.Printf("error getting home directory: %v", err)
return err
}
manifest := ManifestV2{
SchemaVersion: 2,
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
Config: Layer{
MediaType: cfg.MediaType,
Size: cfg.Size,
Digest: cfg.Digest,
},
Layers: layers,
}
manifestJSON, err := json.Marshal(manifest)
if err != nil {
return err
}
fp := filepath.Join(home, ".ollama/models/manifests", name)
err = os.WriteFile(fp, manifestJSON, 0644)
if err != nil {
log.Printf("couldn't write to %s", fp)
return err
}
return nil
}
func GetLayerWithBufferFromLayer(layer *Layer) (*LayerWithBuffer, error) {
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
fp := filepath.Join(home, ".ollama/models/blobs", layer.Digest)
file, err := os.Open(fp)
if err != nil {
return nil, fmt.Errorf("could not open blob: %w", err)
}
defer file.Close()
newLayer, err := CreateLayer(file)
if err != nil {
return nil, err
}
newLayer.MediaType = layer.MediaType
return newLayer, nil
}
func paramsToReader(m map[string]string) (io.Reader, error) {
data, err := json.MarshalIndent(m, "", " ")
if err != nil {
return nil, err
}
return strings.NewReader(string(data)), nil
}
func getLayerDigests(layers []*LayerWithBuffer) ([]string, error) {
var digests []string
for _, l := range layers {
if l.Digest == "" {
return nil, fmt.Errorf("layer is missing a digest")
}
digests = append(digests, l.Digest)
}
return digests, nil
}
// CreateLayer creates a Layer object from a given file
func CreateLayer(f io.Reader) (*LayerWithBuffer, error) {
buf := new(bytes.Buffer)
_, err := io.Copy(buf, f)
if err != nil {
return nil, err
}
digest, size := GetSHA256Digest(buf)
layer := &LayerWithBuffer{
Layer: Layer{
MediaType: "application/vnd.docker.image.rootfs.diff.tar",
Digest: digest,
Size: size,
},
Buffer: buf,
}
return layer, nil
}
func PushModel(name, username, password string, fn func(status, digest string, Total, Completed int, Percent float64)) error {
fn("retrieving manifest", "", 0, 0, 0)
manifest, err := GetManifest(name)
if err != nil {
fn("couldn't retrieve manifest", "", 0, 0, 0)
return err
}
var repoName string
var tag string
comps := strings.Split(name, ":")
switch {
case len(comps) < 1 || len(comps) > 2:
return fmt.Errorf("repository name was invalid")
case len(comps) == 1:
repoName = comps[0]
tag = "latest"
case len(comps) == 2:
repoName = comps[0]
tag = comps[1]
}
var layers []*Layer
var total int
var completed int
for _, layer := range manifest.Layers {
layers = append(layers, layer)
total += layer.Size
}
layers = append(layers, &manifest.Config)
total += manifest.Config.Size
for _, layer := range layers {
exists, err := checkBlobExistence(DefaultRegistry, repoName, layer.Digest, username, password)
if err != nil {
return err
}
if exists {
completed += layer.Size
fn("using existing layer", layer.Digest, total, completed, float64(completed)/float64(total))
continue
}
fn("starting upload", layer.Digest, total, completed, float64(completed)/float64(total))
location, err := startUpload(DefaultRegistry, repoName, username, password)
if err != nil {
log.Printf("couldn't start upload: %v", err)
return err
}
err = uploadBlob(location, layer, username, password)
if err != nil {
log.Printf("error uploading blob: %v", err)
return err
}
completed += layer.Size
fn("upload complete", layer.Digest, total, completed, float64(completed)/float64(total))
}
fn("pushing manifest", "", total, completed, float64(completed/total))
url := fmt.Sprintf("%s/v2/%s/manifests/%s", DefaultRegistry, repoName, tag)
headers := map[string]string{
"Content-Type": "application/vnd.docker.distribution.manifest.v2+json",
}
manifestJSON, err := json.Marshal(manifest)
if err != nil {
return err
}
resp, err := makeRequest("PUT", url, headers, bytes.NewReader(manifestJSON), username, password)
if err != nil {
return err
}
defer resp.Body.Close()
// Check for success: For a successful upload, the Docker registry will respond with a 201 Created
if resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
fn("success", "", total, completed, 1.0)
return nil
}
func PullModel(name, username, password string, fn func(status, digest string, Total, Completed int, Percent float64)) error {
var repoName string
var tag string
comps := strings.Split(name, ":")
switch {
case len(comps) < 1 || len(comps) > 2:
return fmt.Errorf("repository name was invalid")
case len(comps) == 1:
repoName = comps[0]
tag = "latest"
case len(comps) == 2:
repoName = comps[0]
tag = comps[1]
}
fn("pulling manifest", "", 0, 0, 0)
manifest, err := pullModelManifest(DefaultRegistry, repoName, tag, username, password)
if err != nil {
return fmt.Errorf("pull model manifest: %q", err)
}
log.Printf("manifest = %#v", manifest)
var layers []*Layer
var total int
var completed int
for _, layer := range manifest.Layers {
layers = append(layers, layer)
total += layer.Size
}
layers = append(layers, &manifest.Config)
total += manifest.Config.Size
for _, layer := range layers {
fn("starting download", layer.Digest, total, completed, float64(completed)/float64(total))
if err := downloadBlob(DefaultRegistry, repoName, layer.Digest, username, password, fn); err != nil {
fn(fmt.Sprintf("error downloading: %v", err), layer.Digest, 0, 0, 0)
return err
}
completed += layer.Size
fn("download complete", layer.Digest, total, completed, float64(completed)/float64(total))
}
fn("writing manifest", "", total, completed, 1.0)
home, err := os.UserHomeDir()
if err != nil {
return err
}
manifestJSON, err := json.Marshal(manifest)
if err != nil {
return err
}
fp := filepath.Join(home, ".ollama/models/manifests", name)
err = os.MkdirAll(path.Dir(fp), 0o700)
if err != nil {
return fmt.Errorf("make manifests directory: %w", err)
}
err = os.WriteFile(fp, manifestJSON, 0644)
if err != nil {
log.Printf("couldn't write to %s", fp)
return err
}
fn("success", "", total, completed, 1.0)
return nil
}
func pullModelManifest(registryURL, repoName, tag, username, password string) (*ManifestV2, error) {
url := fmt.Sprintf("%s/v2/%s/manifests/%s", registryURL, repoName, tag)
headers := map[string]string{
"Accept": "application/vnd.docker.distribution.manifest.v2+json",
}
resp, err := makeRequest("GET", url, headers, nil, username, password)
if err != nil {
log.Printf("couldn't get manifest: %v", err)
return nil, err
}
defer resp.Body.Close()
// Check for success: For a successful upload, the Docker registry will respond with a 201 Created
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
var m *ManifestV2
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
return nil, err
}
return m, err
}
func createConfigLayer(layers []string) (*LayerWithBuffer, error) {
// TODO change architecture and OS
config := ConfigV2{
Architecture: "arm64",
OS: "linux",
RootFS: RootFS{
Type: "layers",
DiffIDs: layers,
},
}
configJSON, err := json.Marshal(config)
if err != nil {
return nil, err
}
buf := bytes.NewBuffer(configJSON)
digest, size := GetSHA256Digest(buf)
layer := &LayerWithBuffer{
Layer: Layer{
MediaType: "application/vnd.docker.container.image.v1+json",
Digest: digest,
Size: size,
},
Buffer: buf,
}
return layer, nil
}
// GetSHA256Digest returns the SHA256 hash of a given buffer and returns it, and the size of buffer
func GetSHA256Digest(data *bytes.Buffer) (string, int) {
layerBytes := data.Bytes()
hash := sha256.Sum256(layerBytes)
return "sha256:" + hex.EncodeToString(hash[:]), len(layerBytes)
}
func startUpload(registryURL string, repositoryName string, username string, password string) (string, error) {
url := fmt.Sprintf("%s/v2/%s/blobs/uploads/", registryURL, repositoryName)
resp, err := makeRequest("POST", url, nil, nil, username, password)
if err != nil {
log.Printf("couldn't start upload: %v", err)
return "", err
}
defer resp.Body.Close()
// Check for success
if resp.StatusCode != http.StatusAccepted {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
// Extract UUID location from header
location := resp.Header.Get("Location")
if location == "" {
return "", fmt.Errorf("location header is missing in response")
}
return location, nil
}
// Function to check if a blob already exists in the Docker registry
func checkBlobExistence(registryURL string, repositoryName string, digest string, username string, password string) (bool, error) {
url := fmt.Sprintf("%s/v2/%s/blobs/%s", registryURL, repositoryName, digest)
resp, err := makeRequest("HEAD", url, nil, nil, username, password)
if err != nil {
log.Printf("couldn't check for blob: %v", err)
return false, err
}
defer resp.Body.Close()
// Check for success: If the blob exists, the Docker registry will respond with a 200 OK
return resp.StatusCode == http.StatusOK, nil
}
func uploadBlob(location string, layer *Layer, username string, password string) error {
home, err := os.UserHomeDir()
if err != nil {
return err
}
// Create URL
url := fmt.Sprintf("%s&digest=%s", location, layer.Digest)
headers := make(map[string]string)
headers["Content-Length"] = fmt.Sprintf("%d", layer.Size)
headers["Content-Type"] = "application/octet-stream"
// TODO change from monolithic uploads to chunked uploads
// TODO allow resumability
// TODO allow canceling uploads via DELETE
// TODO allow cross repo blob mount
fp := filepath.Join(home, ".ollama/models/blobs", layer.Digest)
f, err := os.Open(fp)
if err != nil {
return err
}
resp, err := makeRequest("PUT", url, headers, f, username, password)
if err != nil {
log.Printf("couldn't upload blob: %v", err)
return err
}
defer resp.Body.Close()
// Check for success: For a successful upload, the Docker registry will respond with a 201 Created
if resp.StatusCode != http.StatusCreated {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
return nil
}
func downloadBlob(registryURL, repoName, digest string, username, password string, fn func(status, digest string, Total, Completed int, Percent float64)) error {
home, err := os.UserHomeDir()
if err != nil {
return err
}
fp := filepath.Join(home, ".ollama/models/blobs", digest)
_, err = os.Stat(fp)
if !os.IsNotExist(err) {
// we already have the file, so return
log.Printf("already have %s\n", digest)
return nil
}
var size int64
fi, err := os.Stat(fp + "-partial")
switch {
case errors.Is(err, os.ErrNotExist):
// noop, file doesn't exist so create it
case err != nil:
return fmt.Errorf("stat: %w", err)
default:
size = fi.Size()
}
url := fmt.Sprintf("%s/v2/%s/blobs/%s", registryURL, repoName, digest)
headers := map[string]string{
"Range": fmt.Sprintf("bytes=%d-", size),
}
resp, err := makeRequest("GET", url, headers, nil, username, password)
if err != nil {
log.Printf("couldn't download blob: %v", err)
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
body, _ := ioutil.ReadAll(resp.Body)
return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
}
err = os.MkdirAll(path.Dir(fp), 0o700)
if err != nil {
return fmt.Errorf("make blobs directory: %w", err)
}
out, err := os.OpenFile(fp+"-partial", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
if err != nil {
panic(err)
}
defer out.Close()
remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
completed := size
total := remaining + completed
for {
fn(fmt.Sprintf("Downloading %s", digest), digest, int(total), int(completed), float64(completed)/float64(total))
if completed >= total {
fmt.Printf("finished downloading\n")
err = os.Rename(fp+"-partial", fp)
if err != nil {
fmt.Printf("error: %v\n", err)
fn(fmt.Sprintf("error renaming file: %v", err), digest, int(total), int(completed), 1)
return err
}
break
}
n, err := io.CopyN(out, resp.Body, 8192)
if err != nil && !errors.Is(err, io.EOF) {
return err
}
completed += n
}
log.Printf("success getting %s\n", digest)
return nil
}
func makeRequest(method, url string, headers map[string]string, body io.Reader, username, password string) (*http.Response, error) {
req, err := http.NewRequest(method, url, body)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Set(k, v)
}
// TODO: better auth
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return fmt.Errorf("too many redirects")
}
log.Printf("redirected to: %s\n", req.URL)
return nil
},
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
return resp, nil
}

View File

@@ -1,140 +0,0 @@
package server
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"path"
"strconv"
)
const directoryURL = "https://ollama.ai/api/models"
type Model struct {
Name string `json:"name"`
DisplayName string `json:"display_name"`
Parameters string `json:"parameters"`
URL string `json:"url"`
ShortDescription string `json:"short_description"`
Description string `json:"description"`
PublishedBy string `json:"published_by"`
OriginalAuthor string `json:"original_author"`
OriginalURL string `json:"original_url"`
License string `json:"license"`
}
func (m *Model) FullName() string {
home, err := os.UserHomeDir()
if err != nil {
panic(err)
}
return path.Join(home, ".ollama", "models", m.Name+".bin")
}
func (m *Model) TempFile() string {
fullName := m.FullName()
return path.Join(
path.Dir(fullName),
fmt.Sprintf(".%s.part", path.Base(fullName)),
)
}
func getRemote(model string) (*Model, error) {
// resolve the model download from our directory
resp, err := http.Get(directoryURL)
if err != nil {
return nil, fmt.Errorf("failed to get directory: %w", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read directory: %w", err)
}
var models []Model
err = json.Unmarshal(body, &models)
if err != nil {
return nil, fmt.Errorf("failed to parse directory: %w", err)
}
for _, m := range models {
if m.Name == model {
return &m, nil
}
}
return nil, fmt.Errorf("model not found in directory: %s", model)
}
func saveModel(model *Model, fn func(total, completed int64)) error {
// this models cache directory is created by the server on startup
client := &http.Client{}
req, err := http.NewRequest("GET", model.URL, nil)
if err != nil {
return fmt.Errorf("failed to download model: %w", err)
}
// check if completed file exists
fi, err := os.Stat(model.FullName())
switch {
case errors.Is(err, os.ErrNotExist):
// noop, file doesn't exist so create it
case err != nil:
return fmt.Errorf("stat: %w", err)
default:
fn(fi.Size(), fi.Size())
return nil
}
var size int64
// completed file doesn't exist, check partial file
fi, err = os.Stat(model.TempFile())
switch {
case errors.Is(err, os.ErrNotExist):
// noop, file doesn't exist so create it
case err != nil:
return fmt.Errorf("stat: %w", err)
default:
size = fi.Size()
}
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", size))
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to download model: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return fmt.Errorf("failed to download model: %s", resp.Status)
}
out, err := os.OpenFile(model.TempFile(), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
if err != nil {
panic(err)
}
defer out.Close()
remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
completed := size
total := remaining + completed
for {
fn(total, completed)
if completed >= total {
return os.Rename(model.TempFile(), model.FullName())
}
n , err := io.CopyN(out, resp.Body, 8192)
if err != nil && !errors.Is(err, io.EOF) {
return err
}
completed += n
}
}

View File

@@ -1,42 +1,39 @@
package server
import (
"embed"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"math"
"net"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"text/template"
"time"
"github.com/gin-gonic/gin"
"github.com/lithammer/fuzzysearch/fuzzy"
"github.com/jmorganca/ollama/api"
"github.com/jmorganca/ollama/llama"
)
//go:embed templates/*
var templatesFS embed.FS
var templates = template.Must(template.ParseFS(templatesFS, "templates/*.prompt"))
func cacheDir() string {
home, err := os.UserHomeDir()
if err != nil {
panic(err)
}
return path.Join(home, ".ollama")
return filepath.Join(home, ".ollama")
}
func generate(c *gin.Context) {
start := time.Now()
req := api.GenerateRequest{
Options: api.DefaultOptions(),
Prompt: "",
}
if err := c.ShouldBindJSON(&req); err != nil {
@@ -44,52 +41,49 @@ func generate(c *gin.Context) {
return
}
if remoteModel, _ := getRemote(req.Model); remoteModel != nil {
req.Model = remoteModel.FullName()
}
if _, err := os.Stat(req.Model); err != nil {
if !errors.Is(err, os.ErrNotExist) {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
req.Model = path.Join(cacheDir(), "models", req.Model+".bin")
model, err := GetModel(req.Model)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ch := make(chan any)
go stream(c, ch)
templateNames := make([]string, 0, len(templates.Templates()))
for _, template := range templates.Templates() {
templateNames = append(templateNames, template.Name())
templ, err := template.New("").Parse(model.Prompt)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
match, _ := matchRankOne(path.Base(req.Model), templateNames)
if template := templates.Lookup(match); template != nil {
var sb strings.Builder
if err := template.Execute(&sb, req); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
req.Prompt = sb.String()
var sb strings.Builder
if err = templ.Execute(&sb, req); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
req.Prompt = sb.String()
llm, err := llama.New(req.Model, req.Options)
fmt.Printf("prompt = >>>%s<<<\n", req.Prompt)
llm, err := llama.New(model.ModelPath, req.Options)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
defer llm.Close()
fn := func(s string) {
ch <- api.GenerateResponse{Response: s}
}
ch := make(chan any)
go func() {
defer close(ch)
llm.Predict(req.Context, req.Prompt, func(r api.GenerateResponse) {
r.Model = req.Model
r.CreatedAt = time.Now().UTC()
if r.Done {
r.TotalDuration = time.Since(start)
}
if err := llm.Predict(req.Prompt, fn); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
ch <- r
})
}()
streamResponse(c, ch)
}
func pull(c *gin.Context) {
@@ -99,27 +93,87 @@ func pull(c *gin.Context) {
return
}
remote, err := getRemote(req.Model)
if err != nil {
c.JSON(http.StatusBadGateway, gin.H{"error": err.Error()})
ch := make(chan any)
go func() {
defer close(ch)
fn := func(status, digest string, total, completed int, percent float64) {
ch <- api.PullProgress{
Status: status,
Digest: digest,
Total: total,
Completed: completed,
Percent: percent,
}
}
if err := PullModel(req.Name, req.Username, req.Password, fn); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}()
streamResponse(c, ch)
}
func push(c *gin.Context) {
var req api.PushRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
ch := make(chan any)
go stream(c, ch)
fn := func(total, completed int64) {
ch <- api.PullProgress{
Total: total,
Completed: completed,
Percent: float64(completed) / float64(total) * 100,
go func() {
defer close(ch)
fn := func(status, digest string, total, completed int, percent float64) {
ch <- api.PushProgress{
Status: status,
Digest: digest,
Total: total,
Completed: completed,
Percent: percent,
}
}
}
if err := PushModel(req.Name, req.Username, req.Password, fn); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
}()
if err := saveModel(remote, fn); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
streamResponse(c, ch)
}
func create(c *gin.Context) {
var req api.CreateRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
// NOTE consider passing the entire Modelfile in the json instead of the path to it
file, err := os.Open(req.Path)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
defer file.Close()
ch := make(chan any)
go func() {
defer close(ch)
fn := func(status string) {
ch <- api.CreateProgress{
Status: status,
}
}
if err := CreateModel(req.Name, file, fn); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
}()
streamResponse(c, ch)
}
func Serve(ln net.Listener) error {
@@ -129,8 +183,10 @@ func Serve(ln net.Listener) error {
c.String(http.StatusOK, "Ollama is running")
})
r.POST("api/pull", pull)
r.POST("/api/pull", pull)
r.POST("/api/generate", generate)
r.POST("/api/create", create)
r.POST("/api/push", push)
log.Printf("Listening on %s", ln.Addr())
s := &http.Server{
@@ -140,19 +196,7 @@ func Serve(ln net.Listener) error {
return s.Serve(ln)
}
func matchRankOne(source string, targets []string) (bestMatch string, bestRank int) {
bestRank = math.MaxInt
for _, target := range targets {
if rank := fuzzy.LevenshteinDistance(source, target); bestRank > rank {
bestRank = rank
bestMatch = target
}
}
return
}
func stream(c *gin.Context, ch chan any) {
func streamResponse(c *gin.Context, ch chan any) {
c.Stream(func(w io.Writer) bool {
val, ok := <-ch
if !ok {

View File

@@ -1,4 +1,6 @@
{{- if not .Context }}
Below is an instruction that describes a task. Write a response that appropriately completes the request.
{{- end }}
### Instruction:
{{ .Prompt }}

View File

@@ -1,3 +1,5 @@
{{- if not .Context }}
A helpful assistant who helps the user with any questions asked.
{{- end }}
User: {{ .Prompt }}
Assistant:

View File

@@ -1,4 +1,6 @@
{{- if not .Context }}
Below is an instruction that describes a task. Write a response that appropriately completes the request. Be concise. Once the request is completed, include no other text.
{{- end }}
### Instruction:
{{ .Prompt }}
### Response:

View File

@@ -1,5 +1,7 @@
{{- if not .Context }}
### System:
You are an AI assistant that follows instruction extremely well. Help as much as you can.
{{- end }}
### User:
{{ .Prompt }}

View File

@@ -1,4 +1,6 @@
{{ if not .Context }}
A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
{{- end }}
USER: {{ .Prompt }}
ASSISTANT:

View File

@@ -1,4 +1,6 @@
{{- if not .Context }}
Below is an instruction that describes a task. Write a response that appropriately completes the request
{{- end }}
### Instruction: {{ .Prompt }}