Compare commits
102 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c84bbf1dd6 | ||
![]() |
f723bf0879 | ||
![]() |
cbf725a9ba | ||
![]() |
086449b6c7 | ||
![]() |
3cbc6a5c01 | ||
![]() |
54bb49a502 | ||
![]() |
cabaada956 | ||
![]() |
a894cc792d | ||
![]() |
519f4d98ef | ||
![]() |
b963a83559 | ||
![]() |
bf6688abe6 | ||
![]() |
6005b157c2 | ||
![]() |
14220d9833 | ||
![]() |
8ca50f24f3 | ||
![]() |
c149fc3143 | ||
![]() |
afbc763dac | ||
![]() |
5dfe91be8b | ||
![]() |
9f944c00f1 | ||
![]() |
56e87cecb1 | ||
![]() |
5ee6116420 | ||
![]() |
5d9a4cd251 | ||
![]() |
0ebec07569 | ||
![]() |
08265515b3 | ||
![]() |
67e593e355 | ||
![]() |
d15c7622b9 | ||
![]() |
1deb35ca64 | ||
![]() |
e2de886831 | ||
![]() |
f0d7c2f5ea | ||
![]() |
12052a7624 | ||
![]() |
23e1da778d | ||
![]() |
326de48930 | ||
![]() |
18f2cb0472 | ||
![]() |
53bc36d207 | ||
![]() |
4dcf5c3e0b | ||
![]() |
d1b2f532b9 | ||
![]() |
e26085b921 | ||
![]() |
f7b613332c | ||
![]() |
f594c8eb91 | ||
![]() |
76b85bc0e9 | ||
![]() |
af98a1773f | ||
![]() |
9ae9a89883 | ||
![]() |
648f0974c6 | ||
![]() |
fc5230dffa | ||
![]() |
2ab20095b3 | ||
![]() |
f020e1d519 | ||
![]() |
4b2d366c37 | ||
![]() |
56fd4e4ef2 | ||
![]() |
2c8b680b03 | ||
![]() |
99b6b60085 | ||
![]() |
74f00474e1 | ||
![]() |
e9a9580bdd | ||
![]() |
4c33a9ac67 | ||
![]() |
22885aeaee | ||
![]() |
ed969d2a06 | ||
![]() |
d9cf18e28d | ||
![]() |
1556162c90 | ||
![]() |
148f0225c0 | ||
![]() |
4e07941b1e | ||
![]() |
202c29c21a | ||
![]() |
c1c871620a | ||
![]() |
a21a8bef56 | ||
![]() |
522726228a | ||
![]() |
9770e3b325 | ||
![]() |
d617823355 | ||
![]() |
6ed991c8e2 | ||
![]() |
e41576e768 | ||
![]() |
155c1640f1 | ||
![]() |
f7d4947573 | ||
![]() |
0d7a133b15 | ||
![]() |
e863066144 | ||
![]() |
89a92477ad | ||
![]() |
5cda9cdd13 | ||
![]() |
e5914eb320 | ||
![]() |
ab78f48ff8 | ||
![]() |
b1c88eb978 | ||
![]() |
efae43f932 | ||
![]() |
d3ee1329e9 | ||
![]() |
700c719422 | ||
![]() |
55aa4aaf0f | ||
![]() |
820f95c4c4 | ||
![]() |
3a05d3def7 | ||
![]() |
edac9c2446 | ||
![]() |
d9c2687fd0 | ||
![]() |
6517bcc53c | ||
![]() |
4f54f25b66 | ||
![]() |
6a6828bddf | ||
![]() |
c0e7a3b90e | ||
![]() |
f27bc261cf | ||
![]() |
21e6197c0b | ||
![]() |
75d7d681c9 | ||
![]() |
81d8d7b73f | ||
![]() |
5c0de09a07 | ||
![]() |
20bf000e55 | ||
![]() |
40d0c4a1dc | ||
![]() |
be889b2f81 | ||
![]() |
37c9a8eea9 | ||
![]() |
6de5d032e1 | ||
![]() |
d791df75dd | ||
![]() |
020a3b3530 | ||
![]() |
fccf8d179f | ||
![]() |
43c40c500e | ||
![]() |
e37f4c4f42 |
11
README.md
11
README.md
@@ -29,7 +29,9 @@ ollama run llama2
|
||||
|
||||
## Model library
|
||||
|
||||
`ollama` includes a library of open-source models:
|
||||
Ollama supports a list of open-source models available on [ollama.ai/library](https://ollama.ai/library "ollama model library")
|
||||
|
||||
Here are some example open-source models that can be downloaded:
|
||||
|
||||
| Model | Parameters | Size | Download |
|
||||
| ------------------------ | ---------- | ----- | ------------------------------- |
|
||||
@@ -37,9 +39,10 @@ ollama run llama2
|
||||
| Llama2 13B | 13B | 7.3GB | `ollama pull llama2:13b` |
|
||||
| Llama2 70B | 70B | 39GB | `ollama pull llama2:70b` |
|
||||
| Llama2 Uncensored | 7B | 3.8GB | `ollama pull llama2-uncensored` |
|
||||
| Orca Mini | 3B | 1.9GB | `ollama pull orca` |
|
||||
| Orca Mini | 3B | 1.9GB | `ollama pull orca-mini` |
|
||||
| Vicuna | 7B | 3.8GB | `ollama pull vicuna` |
|
||||
| Nous-Hermes | 13B | 7.3GB | `ollama pull nous-hermes` |
|
||||
| Nous-Hermes | 7B | 3.8GB | `ollama pull nous-hermes` |
|
||||
| Nous-Hermes 13B | 13B | 7.3GB | `ollama pull nous-hermes:13b` |
|
||||
| Wizard Vicuna Uncensored | 13B | 7.3GB | `ollama pull wizard-vicuna` |
|
||||
|
||||
> Note: You should have at least 8 GB of RAM to run the 3B models, 16 GB to run the 7B models, and 32 GB to run the 13B models.
|
||||
@@ -156,6 +159,8 @@ curl -X POST http://localhost:11434/api/generate -d '{
|
||||
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with a question-answering [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa).
|
||||
- [Continue](https://github.com/continuedev/continue) - embeds Ollama inside Visual Studio Code. The extension lets you highlight code to add to the prompt, ask questions in the sidebar, and generate code inline.
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm) a lightweight python package to simplify LLM API calls
|
||||
- [Discord AI Bot](https://github.com/mekb-turtle/discord-ai-bot) - interact with Ollama as a chatbot on Discord.
|
||||
- [Raycast Ollama](https://github.com/MassimilianoPasquini97/raycast_ollama) - Raycast extension to use Ollama for local llama inference on Raycast.
|
||||
- [Simple HTML UI for Ollama](https://github.com/rtcfirefly/ollama-ui)
|
||||
- [Emacs client](https://github.com/zweifisch/ollama) for Ollama
|
||||
|
@@ -9,10 +9,18 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const DefaultHost = "localhost:11434"
|
||||
|
||||
var (
|
||||
envHost = os.Getenv("OLLAMA_HOST")
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
base url.URL
|
||||
Base url.URL
|
||||
HTTP http.Client
|
||||
Headers http.Header
|
||||
}
|
||||
@@ -33,16 +41,34 @@ func checkError(resp *http.Response, body []byte) error {
|
||||
return apiError
|
||||
}
|
||||
|
||||
func NewClient(hosts ...string) *Client {
|
||||
host := "127.0.0.1:11434"
|
||||
if len(hosts) > 0 {
|
||||
host = hosts[0]
|
||||
// Host returns the default host to use for the client. It is determined in the following order:
|
||||
// 1. The OLLAMA_HOST environment variable
|
||||
// 2. The default host (localhost:11434)
|
||||
func Host() string {
|
||||
if envHost != "" {
|
||||
return envHost
|
||||
}
|
||||
return DefaultHost
|
||||
}
|
||||
|
||||
// FromEnv creates a new client using Host() as the host. An error is returns
|
||||
// if the host is invalid.
|
||||
func FromEnv() (*Client, error) {
|
||||
h := Host()
|
||||
if !strings.HasPrefix(h, "http://") && !strings.HasPrefix(h, "https://") {
|
||||
h = "http://" + h
|
||||
}
|
||||
|
||||
return &Client{
|
||||
base: url.URL{Scheme: "http", Host: host},
|
||||
HTTP: http.Client{},
|
||||
u, err := url.Parse(h)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse host: %w", err)
|
||||
}
|
||||
|
||||
if u.Port() == "" {
|
||||
u.Host += ":11434"
|
||||
}
|
||||
|
||||
return &Client{Base: *u, HTTP: http.Client{}}, nil
|
||||
}
|
||||
|
||||
func (c *Client) do(ctx context.Context, method, path string, reqData, respData any) error {
|
||||
@@ -57,7 +83,7 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
url := c.base.JoinPath(path).String()
|
||||
url := c.Base.JoinPath(path).String()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, reqBody)
|
||||
if err != nil {
|
||||
@@ -105,7 +131,7 @@ func (c *Client) stream(ctx context.Context, method, path string, data any, fn f
|
||||
buf = bytes.NewBuffer(bts)
|
||||
}
|
||||
|
||||
request, err := http.NewRequestWithContext(ctx, method, c.base.JoinPath(path).String(), buf)
|
||||
request, err := http.NewRequestWithContext(ctx, method, c.Base.JoinPath(path).String(), buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
28
api/types.go
28
api/types.go
@@ -216,19 +216,25 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
if opt, ok := jsonOpts[key]; ok {
|
||||
field := valueOpts.FieldByName(opt.Name)
|
||||
if field.IsValid() && field.CanSet() {
|
||||
if val == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch field.Kind() {
|
||||
case reflect.Int:
|
||||
// when JSON unmarshals numbers, it uses float64 by default, not int
|
||||
val, ok := val.(float64)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to int, skipped", key)
|
||||
continue
|
||||
switch t := val.(type) {
|
||||
case int64:
|
||||
field.SetInt(t)
|
||||
case float64:
|
||||
// when JSON unmarshals numbers, it uses float64, not int
|
||||
field.SetInt(int64(t))
|
||||
default:
|
||||
log.Printf("could not convert model parameter %v to int, skipped", key)
|
||||
}
|
||||
field.SetInt(int64(val))
|
||||
case reflect.Bool:
|
||||
val, ok := val.(bool)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to bool, skipped", key)
|
||||
log.Printf("could not convert model parameter %v to bool, skipped", key)
|
||||
continue
|
||||
}
|
||||
field.SetBool(val)
|
||||
@@ -236,14 +242,14 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
// JSON unmarshals to float64
|
||||
val, ok := val.(float64)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to float32, skipped", key)
|
||||
log.Printf("could not convert model parameter %v to float32, skipped", key)
|
||||
continue
|
||||
}
|
||||
field.SetFloat(val)
|
||||
case reflect.String:
|
||||
val, ok := val.(string)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to string, skipped", key)
|
||||
log.Printf("could not convert model parameter %v to string, skipped", key)
|
||||
continue
|
||||
}
|
||||
field.SetString(val)
|
||||
@@ -251,7 +257,7 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
// JSON unmarshals to []interface{}, not []string
|
||||
val, ok := val.([]interface{})
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to slice, skipped", key)
|
||||
log.Printf("could not convert model parameter %v to slice, skipped", key)
|
||||
continue
|
||||
}
|
||||
// convert []interface{} to []string
|
||||
@@ -259,7 +265,7 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
for i, item := range val {
|
||||
str, ok := item.(string)
|
||||
if !ok {
|
||||
log.Printf("could not convert model parmeter %v to slice of strings, skipped", key)
|
||||
log.Printf("could not convert model parameter %v to slice of strings, skipped", key)
|
||||
continue
|
||||
}
|
||||
slice[i] = str
|
||||
|
125
cmd/cmd.go
125
cmd/cmd.go
@@ -3,6 +3,9 @@ package cmd
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -11,6 +14,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -20,6 +24,7 @@ import (
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
"github.com/jmorganca/ollama/format"
|
||||
@@ -34,7 +39,10 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var spinner *Spinner
|
||||
|
||||
@@ -70,6 +78,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
spinner = NewSpinner(resp.Status)
|
||||
go spinner.Spin(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -79,6 +88,9 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
if spinner != nil {
|
||||
spinner.Stop()
|
||||
if spinner.description != "success" {
|
||||
return errors.New("unexpected end to create model")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -112,7 +124,10 @@ func RunHandler(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func PushHandler(cmd *cobra.Command, args []string) error {
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
insecure, err := cmd.Flags().GetBool("insecure")
|
||||
if err != nil {
|
||||
@@ -144,11 +159,19 @@ func PushHandler(cmd *cobra.Command, args []string) error {
|
||||
if err := client.Push(context.Background(), &request, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bar != nil && !bar.IsFinished() {
|
||||
return errors.New("unexpected end to push model")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ListHandler(cmd *cobra.Command, args []string) error {
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
models, err := client.List(context.Background())
|
||||
if err != nil {
|
||||
@@ -178,7 +201,10 @@ func ListHandler(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func DeleteHandler(cmd *cobra.Command, args []string) error {
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := api.DeleteRequest{Name: args[0]}
|
||||
if err := client.Delete(context.Background(), &req); err != nil {
|
||||
@@ -189,7 +215,10 @@ func DeleteHandler(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func CopyHandler(cmd *cobra.Command, args []string) error {
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := api.CopyRequest{Source: args[0], Destination: args[1]}
|
||||
if err := client.Copy(context.Background(), &req); err != nil {
|
||||
@@ -209,7 +238,10 @@ func PullHandler(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func pull(model string, insecure bool) error {
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var currentDigest string
|
||||
var bar *progressbar.ProgressBar
|
||||
@@ -230,12 +262,18 @@ func pull(model string, insecure bool) error {
|
||||
currentDigest = ""
|
||||
fmt.Println(resp.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := client.Pull(context.Background(), &request, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bar != nil && !bar.IsFinished() {
|
||||
return errors.New("unexpected end to pull model")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -256,7 +294,10 @@ type generateContextKey string
|
||||
|
||||
func generate(cmd *cobra.Command, model, prompt string) error {
|
||||
if len(strings.TrimSpace(prompt)) > 0 {
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spinner := NewSpinner("")
|
||||
go spinner.Spin(60 * time.Millisecond)
|
||||
@@ -299,6 +340,10 @@ func generate(cmd *cobra.Command, model, prompt string) error {
|
||||
fmt.Println()
|
||||
fmt.Println()
|
||||
|
||||
if !latest.Done {
|
||||
return errors.New("unexpected end of response")
|
||||
}
|
||||
|
||||
verbose, err := cmd.Flags().GetBool("verbose")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -540,6 +585,11 @@ func RunServer(cmd *cobra.Command, _ []string) error {
|
||||
port = p
|
||||
}
|
||||
|
||||
err := initializeKeypair()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := net.Listen("tcp", fmt.Sprintf("%s:%s", host, port))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -553,6 +603,55 @@ func RunServer(cmd *cobra.Command, _ []string) error {
|
||||
return server.Serve(ln, origins)
|
||||
}
|
||||
|
||||
func initializeKeypair() error {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
privKeyPath := filepath.Join(home, ".ollama", "id_ed25519")
|
||||
pubKeyPath := filepath.Join(home, ".ollama", "id_ed25519.pub")
|
||||
|
||||
_, err = os.Stat(privKeyPath)
|
||||
if os.IsNotExist(err) {
|
||||
fmt.Printf("Couldn't find '%s'. Generating new private key.\n", privKeyPath)
|
||||
_, privKey, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
privKeyBytes, err := format.OpenSSHPrivateKey(privKey, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(path.Dir(privKeyPath), 0o700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create directory %w", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(privKeyPath, pem.EncodeToMemory(privKeyBytes), 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sshPrivateKey, err := ssh.NewSignerFromKey(privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pubKeyData := ssh.MarshalAuthorizedKey(sshPrivateKey.PublicKey())
|
||||
|
||||
err = os.WriteFile(pubKeyPath, pubKeyData, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Your new public key is: \n\n%s\n", string(pubKeyData))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func startMacApp(client *api.Client) error {
|
||||
exe, err := os.Executable()
|
||||
if err != nil {
|
||||
@@ -585,7 +684,10 @@ func startMacApp(client *api.Client) error {
|
||||
}
|
||||
|
||||
func checkServerHeartbeat(_ *cobra.Command, _ []string) error {
|
||||
client := api.NewClient()
|
||||
client, err := api.FromEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := client.Heartbeat(context.Background()); err != nil {
|
||||
if !strings.Contains(err.Error(), "connection refused") {
|
||||
return err
|
||||
@@ -605,9 +707,10 @@ func NewCLI() *cobra.Command {
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "ollama",
|
||||
Short: "Large language model runner",
|
||||
SilenceUsage: true,
|
||||
Use: "ollama",
|
||||
Short: "Large language model runner",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
CompletionOptions: cobra.CompletionOptions{
|
||||
DisableDefaultCmd: true,
|
||||
},
|
||||
|
@@ -3,3 +3,4 @@
|
||||
- [Modelfile](./modelfile.md)
|
||||
- [How to develop Ollama](./development.md)
|
||||
- [API](./api.md)
|
||||
- [Tutorials](./tutorials.md)
|
||||
|
37
docs/api.md
37
docs/api.md
@@ -8,6 +8,7 @@
|
||||
- [Copy a model](#copy-a-model)
|
||||
- [Delete a model](#delete-a-model)
|
||||
- [Pull a model](#pull-a-model)
|
||||
- [Generate embeddings](#generate-embeddings)
|
||||
|
||||
## Conventions
|
||||
|
||||
@@ -37,6 +38,7 @@ Advanced parameters:
|
||||
- `options`: additional model parameters listed in the documentation for the [Modelfile](./modelfile.md#valid-parameters-and-values) such as `temperature`
|
||||
- `system`: system prompt to (overrides what is defined in the `Modelfile`)
|
||||
- `template`: the full prompt or prompt template (overrides what is defined in the `Modelfile`)
|
||||
- `context`: the context parameter returned from a previous request to `/generate`, this can be used to keep a short conversational memory
|
||||
|
||||
### Request
|
||||
|
||||
@@ -70,6 +72,7 @@ The final response in the stream also includes additional data about the generat
|
||||
- `prompt_eval_duration`: time spent in nanoseconds evaluating the prompt
|
||||
- `eval_count`: number of tokens the response
|
||||
- `eval_duration`: time in nanoseconds spent generating the response
|
||||
- `context`: an encoding of the conversation used in this response, this can be sent in the next request to keep a conversational memory
|
||||
|
||||
To calculate how fast the response is generated in tokens per second (token/s), divide `eval_count` / `eval_duration`.
|
||||
|
||||
@@ -77,6 +80,7 @@ To calculate how fast the response is generated in tokens per second (token/s),
|
||||
{
|
||||
"model": "llama2:7b",
|
||||
"created_at": "2023-08-04T19:22:45.499127Z",
|
||||
"context": [1, 2, 3],
|
||||
"done": true,
|
||||
"total_duration": 5589157167,
|
||||
"load_duration": 3013701500,
|
||||
@@ -220,3 +224,36 @@ curl -X POST http://localhost:11434/api/pull -d '{
|
||||
"total": 2142590208
|
||||
}
|
||||
```
|
||||
|
||||
## Generate Embeddings
|
||||
|
||||
```
|
||||
POST /api/embeddings
|
||||
```
|
||||
|
||||
Generate embeddings from a model
|
||||
|
||||
### Parameters
|
||||
|
||||
- `model`: name of model to generate embeddings from
|
||||
- `prompt`: text to generate embeddings for
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
curl -X POST http://localhost:11434/api/embeddings -d '{
|
||||
"model": "llama2:7b",
|
||||
"prompt": "Here is an article about llamas..."
|
||||
}'
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
{
|
||||
"embeddings": [
|
||||
0.5670403838157654, 0.009260174818336964, 0.23178744316101074, -0.2916173040866852, -0.8924556970596313,
|
||||
0.8785552978515625, -0.34576427936553955, 0.5742510557174683, -0.04222835972905159, -0.137906014919281
|
||||
]
|
||||
}
|
||||
```
|
||||
|
@@ -12,11 +12,13 @@ A model file is the blueprint to create and share models with Ollama.
|
||||
- [FROM (Required)](#from-required)
|
||||
- [Build from llama2](#build-from-llama2)
|
||||
- [Build from a bin file](#build-from-a-bin-file)
|
||||
- [EMBED](#embed)
|
||||
- [PARAMETER](#parameter)
|
||||
- [Valid Parameters and Values](#valid-parameters-and-values)
|
||||
- [TEMPLATE](#template)
|
||||
- [Template Variables](#template-variables)
|
||||
- [SYSTEM](#system)
|
||||
- [ADAPTER](#adapter)
|
||||
- [LICENSE](#license)
|
||||
- [Notes](#notes)
|
||||
|
||||
@@ -35,6 +37,7 @@ INSTRUCTION arguments
|
||||
| [`PARAMETER`](#parameter) | Sets the parameters for how Ollama will run the model. |
|
||||
| [`TEMPLATE`](#template) | The full prompt template to be sent to the model. |
|
||||
| [`SYSTEM`](#system) | Specifies the system prompt that will be set in the template. |
|
||||
| [`ADAPTER`](#adapter) | Defines the (Q)LoRA adapters to apply to the model. |
|
||||
| [`LICENSE`](#license) | Specifies the legal license. |
|
||||
|
||||
## Examples
|
||||
@@ -88,6 +91,16 @@ FROM ./ollama-model.bin
|
||||
|
||||
This bin file location should be specified as an absolute path or relative to the Modelfile location.
|
||||
|
||||
### EMBED
|
||||
|
||||
The EMBED instruction is used to add embeddings of files to a model. This is useful for adding custom data that the model can reference when generating an answer. Note that currently only text files are supported, formatted with each line as one embedding.
|
||||
```
|
||||
FROM <model name>:<tag>
|
||||
EMBED <file path>.txt
|
||||
EMBED <different file path>.txt
|
||||
EMBED <path to directory>/*.txt
|
||||
```
|
||||
|
||||
### PARAMETER
|
||||
|
||||
The `PARAMETER` instruction defines a parameter that can be set when the model is run.
|
||||
@@ -104,6 +117,7 @@ PARAMETER <parameter> <parametervalue>
|
||||
| mirostat_eta | Influences how quickly the algorithm responds to feedback from the generated text. A lower learning rate will result in slower adjustments, while a higher learning rate will make the algorithm more responsive. (Default: 0.1) | float | mirostat_eta 0.1 |
|
||||
| mirostat_tau | Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0) | float | mirostat_tau 5.0 |
|
||||
| num_ctx | Sets the size of the context window used to generate the next token. (Default: 2048) | int | num_ctx 4096 |
|
||||
| num_gqa | The number of GQA groups in the transformer layer. Required for some models, for example it is 8 for llama2:70b | int | num_gqa 1 |
|
||||
| num_gpu | The number of GPUs to use. On macOS it defaults to 1 to enable metal support, 0 to disable. | int | num_gpu 1 |
|
||||
| num_thread | Sets the number of threads to use during computation. By default, Ollama will detect this for optimal performance. It is recommended to set this value to the number of physical CPU cores your system has (as opposed to the logical number of cores). | int | num_thread 8 |
|
||||
| repeat_last_n | Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx) | int | repeat_last_n 64 |
|
||||
@@ -150,6 +164,14 @@ The `SYSTEM` instruction specifies the system prompt to be used in the template,
|
||||
SYSTEM """<system message>"""
|
||||
```
|
||||
|
||||
### ADAPTER
|
||||
|
||||
The `ADAPTER` instruction specifies the LoRA adapter to apply to the base model. The value of this instruction should be an absolute path or a path relative to the Modelfile and the file must be in a GGML file format. The adapter should be tuned from the base model otherwise the behaviour is undefined.
|
||||
|
||||
```
|
||||
ADAPTER ./ollama-lora.bin
|
||||
```
|
||||
|
||||
### LICENSE
|
||||
|
||||
The `LICENSE` instruction allows you to specify the legal license under which the model used with this Modelfile is shared or distributed.
|
||||
@@ -163,4 +185,4 @@ LICENSE """
|
||||
## Notes
|
||||
|
||||
- the **modelfile is not case sensitive**. In the examples, we use uppercase for instructions to make it easier to distinguish it from arguments.
|
||||
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.
|
||||
- Instructions can be in any order. In the examples, we start with FROM instruction to keep it easily readable.
|
||||
|
8
docs/tutorials.md
Normal file
8
docs/tutorials.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Tutorials
|
||||
|
||||
Here is a list of ways you can use Ollama with other tools to build interesting applications.
|
||||
|
||||
- [Using LangChain with Ollama in JavaScript](./tutorials/langchainjs.md)
|
||||
- [Using LangChain with Ollama in Python](./tutorials/langchainpy.md)
|
||||
|
||||
Also be sure to check out the [examples](../examples) directory for more ways to use Ollama.
|
73
docs/tutorials/langchainjs.md
Normal file
73
docs/tutorials/langchainjs.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Using LangChain with Ollama using JavaScript
|
||||
|
||||
In this tutorial, we are going to use JavaScript with LangChain and Ollama to learn about something just a touch more recent. In August 2023, there was a series of wildfires on Maui. There is no way an LLM trained before that time can know about this, since their training data would not include anything as recent as that. So we can find the [Wikipedia article about the fires](https://en.wikipedia.org/wiki/2023_Hawaii_wildfires) and ask questions about the contents.
|
||||
|
||||
To get started, let's just use **LangChain** to ask a simple question to a model. To do this with JavaScript, we need to install **LangChain**:
|
||||
|
||||
```bash
|
||||
npm install langchain
|
||||
```
|
||||
|
||||
Now we can start building out our JavaScript:
|
||||
|
||||
```javascript
|
||||
import { Ollama } from "langchain/llms/ollama";
|
||||
|
||||
const ollama = new Ollama({
|
||||
baseUrl: "http://localhost:11434",
|
||||
model: "llama2",
|
||||
});
|
||||
|
||||
const answer = await ollama.call(`why is the sky blue?`);
|
||||
|
||||
console.log(answer);
|
||||
```
|
||||
|
||||
That will get us the same thing as if we ran `ollama run llama2 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's build that part of the app.
|
||||
|
||||
```javascript
|
||||
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
|
||||
|
||||
const loader = new CheerioWebBaseLoader("https://en.wikipedia.org/wiki/2023_Hawaii_wildfires");
|
||||
const data = loader.load();
|
||||
```
|
||||
|
||||
That will load the document. Although this page is smaller than the Odyssey, it is certainly bigger than the context size for most LLMs. So we are going to need to split into smaller pieces, and then select just the pieces relevant to our question. This is a great use for a vector datastore. In this example, we will use the **MemoryVectorStore** that is part of **LangChain**. But there is one more thing we need to get the content into the datastore. We have to run an embeddings process that converts the tokens in the text into a series of vectors. And for that, we are going to use **Tensorflow**. There is a lot of stuff going on in this one. First, install the **Tensorflow** components that we need.
|
||||
|
||||
```javascript
|
||||
npm install @tensorflow/tfjs-core@3.6.0 @tensorflow/tfjs-converter@3.6.0 @tensorflow-models/universal-sentence-encoder@1.3.3 @tensorflow/tfjs-node@4.10.0
|
||||
```
|
||||
|
||||
If you just install those components without the version numbers, it will install the latest versions, but there are conflicts within **Tensorflow**, so you need to install the compatible versions.
|
||||
|
||||
```javascript
|
||||
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"
|
||||
import { MemoryVectorStore } from "langchain/vectorstores/memory";
|
||||
import "@tensorflow/tfjs-node";
|
||||
import { TensorFlowEmbeddings } from "langchain/embeddings/tensorflow";
|
||||
|
||||
// Split the text into 500 character chunks. And overlap each chunk by 20 characters
|
||||
const textSplitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: 500,
|
||||
chunkOverlap: 20
|
||||
});
|
||||
const splitDocs = await textSplitter.splitDocuments(data);
|
||||
|
||||
// Then use the TensorFlow Embedding to store these chunks in the datastore
|
||||
const vectorStore = await MemoryVectorStore.fromDocuments(splitDocs, new TensorFlowEmbeddings());
|
||||
```
|
||||
|
||||
To connect the datastore to a question asked to a LLM, we need to use the concept at the heart of **LangChain**: the chain. Chains are a way to connect a number of activities together to accomplish a particular tasks. There are a number of chain types available, but for this tutorial we are using the **RetrievalQAChain**.
|
||||
|
||||
```javascript
|
||||
import { RetrievalQAChain } from "langchain/chains";
|
||||
|
||||
const retriever = vectorStore.asRetriever();
|
||||
const chain = RetrievalQAChain.fromLLM(ollama, retriever);
|
||||
const result = await chain.call({query: "When was Hawaii's request for a major disaster declaration approved?"});
|
||||
console.log(result.text)
|
||||
```
|
||||
|
||||
So we created a retriever, which is a way to return the chunks that match a query from a datastore. And then connect the retriever and the model via a chain. Finally, we send a query to the chain, which results in an answer using our document as a source. The answer it returned was correct, August 10, 2023.
|
||||
|
||||
And that is a simple introduction to what you can do with **LangChain** and **Ollama.**
|
81
docs/tutorials/langchainpy.md
Normal file
81
docs/tutorials/langchainpy.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Using LangChain with Ollama in Python
|
||||
|
||||
Let's imagine we are studying the classics, such as **the Odyssey** by **Homer**. We might have a question about Neleus and his family. If you ask llama2 for that info, you may get something like:
|
||||
|
||||
> I apologize, but I'm a large language model, I cannot provide information on individuals or families that do not exist in reality. Neleus is not a real person or character, and therefore does not have a family or any other personal details. My apologies for any confusion. Is there anything else I can help you with?
|
||||
|
||||
This sounds like a typical censored response, but even llama2-uncensored gives a mediocre answer:
|
||||
|
||||
> Neleus was a legendary king of Pylos and the father of Nestor, one of the Argonauts. His mother was Clymene, a sea nymph, while his father was Neptune, the god of the sea.
|
||||
|
||||
So let's figure out how we can use **LangChain** with Ollama to ask our question to the actual document, the Odyssey by Homer, using Python.
|
||||
|
||||
Let's start by asking a simple question that we can get an answer to from the **Llama2** model using **Ollama**. First, we need to install the **LangChain** package:
|
||||
|
||||
`pip install langchain`
|
||||
|
||||
Then we can create a model and ask the question:
|
||||
|
||||
```python
|
||||
from langchain.llms import Ollama
|
||||
ollama = Ollama(base_url='http://localhost:11434',
|
||||
model="llama2")
|
||||
print(ollama("why is the sky blue"))
|
||||
```
|
||||
|
||||
Notice that we are defining the model and the base URL for Ollama.
|
||||
|
||||
Now let's load a document to ask questions against. I'll load up the Odyssey by Homer, which you can find at Project Gutenberg. We will need **WebBaseLoader** which is part of **LangChain** and loads text from any webpage. On my machine, I also needed to install **bs4** to get that to work, so run `pip install bs4`.
|
||||
|
||||
```python
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
loader = WebBaseLoader("https://www.gutenberg.org/files/1727/1727-h/1727-h.htm")
|
||||
data = loader.load()
|
||||
```
|
||||
|
||||
This file is pretty big. Just the preface is 3000 tokens. Which means the full document won't fit into the context for the model. So we need to split it up into smaller pieces.
|
||||
|
||||
```python
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
|
||||
text_splitter=RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||
all_splits = text_splitter.split_documents(data)
|
||||
```
|
||||
|
||||
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. For now, we don't have embeddings built in to Ollama, though we will be adding that soon, so for now, we can use the GPT4All library for that. We will use ChromaDB in this example for a vector database. `pip install GPT4All chromadb`
|
||||
|
||||
```python
|
||||
from langchain.embeddings import GPT4AllEmbeddings
|
||||
from langchain.vectorstores import Chroma
|
||||
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
|
||||
```
|
||||
|
||||
Now let's ask a question from the document. **Who was Neleus, and who is in his family?** Neleus is a character in the Odyssey, and the answer can be found in our text.
|
||||
|
||||
```python
|
||||
question="Who is Neleus and who is in Neleus' family?"
|
||||
docs = vectorstore.similarity_search(question)
|
||||
len(docs)
|
||||
```
|
||||
|
||||
This will output the number of matches for chunks of data similar to the search.
|
||||
|
||||
The next thing is to send the question and the relevant parts of the docs to the model to see if we can get a good answer. But we are stitching two parts of the process together, and that is called a chain. This means we need to define a chain:
|
||||
|
||||
```python
|
||||
from langchain.chains import RetrievalQA
|
||||
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
|
||||
qachain({"query": question})
|
||||
```
|
||||
|
||||
The answer received from this chain was:
|
||||
|
||||
> Neleus is a character in Homer's "Odyssey" and is mentioned in the context of Penelope's suitors. Neleus is the father of Chloris, who is married to Neleus and bears him several children, including Nestor, Chromius, Periclymenus, and Pero. Amphinomus, the son of Nisus, is also mentioned as a suitor of Penelope and is known for his good natural disposition and agreeable conversation.
|
||||
|
||||
It's not a perfect answer, as it implies Neleus married his daughter when actually Chloris "was the youngest daughter to Amphion son of Iasus and king of Minyan Orchomenus, and was Queen in Pylos".
|
||||
|
||||
I updated the chunk_overlap for the text splitter to 20 and tried again and got a much better answer:
|
||||
|
||||
> Neleus is a character in Homer's epic poem "The Odyssey." He is the husband of Chloris, who is the youngest daughter of Amphion son of Iasus and king of Minyan Orchomenus. Neleus has several children with Chloris, including Nestor, Chromius, Periclymenus, and Pero.
|
||||
|
||||
And that is a much better answer.
|
@@ -1,6 +1,6 @@
|
||||
# Examples
|
||||
|
||||
This directory contains examples that can be created and run with `ollama`.
|
||||
This directory contains different examples of using Ollama
|
||||
|
||||
To create a model:
|
||||
|
||||
|
20
examples/dockerit/Modelfile
Normal file
20
examples/dockerit/Modelfile
Normal file
@@ -0,0 +1,20 @@
|
||||
FROM llama2
|
||||
SYSTEM """
|
||||
You are an experience Devops engineer focused on docker. When given specifications for a particular need or application you know the best way to host that within a docker container. For instance if someone tells you they want an nginx server to host files located at /web you will answer as follows
|
||||
|
||||
---start
|
||||
FROM nginx:alpine
|
||||
COPY /myweb /usr/share/nginx/html
|
||||
EXPOSE 80
|
||||
---end
|
||||
|
||||
Notice that the answer you should give is just the contents of the dockerfile with no explanation and there are three dashes and the word start at the beginning and 3 dashes and the word end. The full output can be piped into a file and run as is. Here is another example. The user will ask to launch a Postgres server with a password of abc123. And the response should be
|
||||
|
||||
---start
|
||||
FROM postgres:latest
|
||||
ENV POSTGRES_PASSWORD=abc123
|
||||
EXPOSE 5432
|
||||
---end
|
||||
|
||||
Again it's just the contents of the dockerfile an nothing else.
|
||||
"""
|
15
examples/dockerit/README.md
Normal file
15
examples/dockerit/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# DockerIt
|
||||
|
||||
DockerIt is a tool to help you build and run your application in a Docker container. It consists of a model that defines the system prompt and model weights to use, along with a python script to then build the container and run the image automatically.
|
||||
|
||||
## Caveats
|
||||
|
||||
This is an simple example. It's assuming the Dockerfile content generated is going to work. In many cases, even with simple web servers, it fails when trying to copy files that don't exist. It's simply an example of what you could possibly do.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```bash
|
||||
> python3 ./dockerit.py "simple postgres server with admin password set to 123"
|
||||
Enter the name of the image: matttest
|
||||
Container named happy_keller started with id: 7c201bb6c30f02b356ddbc8e2a5af9d7d7d7b8c228519c9a501d15c0bd9d6b3e
|
||||
```
|
17
examples/dockerit/dockerit.py
Normal file
17
examples/dockerit/dockerit.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import requests, json, docker, io, sys
|
||||
inputDescription = " ".join(sys.argv[1:])
|
||||
imageName = input("Enter the name of the image: ")
|
||||
client = docker.from_env()
|
||||
s = requests.Session()
|
||||
output=""
|
||||
with s.post('http://localhost:11434/api/generate', json={'model': 'dockerit', 'prompt': inputDescription}, stream=True) as r:
|
||||
for line in r.iter_lines():
|
||||
if line:
|
||||
j = json.loads(line)
|
||||
if "response" in j:
|
||||
output = output +j["response"]
|
||||
output = output[output.find("---start")+9:output.find("---end")-1]
|
||||
f = io.BytesIO(bytes(output, 'utf-8'))
|
||||
client.images.build(fileobj=f, tag=imageName)
|
||||
container = client.containers.run(imageName, detach=True)
|
||||
print("Container named", container.name, " started with id: ",container.id)
|
1
examples/dockerit/requirements.txt
Normal file
1
examples/dockerit/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
docker
|
21
examples/langchain-document/README.md
Normal file
21
examples/langchain-document/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# LangChain Document QA
|
||||
|
||||
This example provides an interface for asking questions to a PDF document.
|
||||
|
||||
## Setup
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```
|
||||
python main.py
|
||||
```
|
||||
|
||||
A prompt will appear, where questions may be asked:
|
||||
|
||||
```
|
||||
Query: How many locations does WeWork have?
|
||||
```
|
61
examples/langchain-document/main.py
Normal file
61
examples/langchain-document/main.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from langchain.document_loaders import OnlinePDFLoader
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.embeddings import GPT4AllEmbeddings
|
||||
from langchain import PromptTemplate
|
||||
from langchain.llms import Ollama
|
||||
from langchain.callbacks.manager import CallbackManager
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.chains import RetrievalQA
|
||||
import sys
|
||||
import os
|
||||
|
||||
class SuppressStdout:
|
||||
def __enter__(self):
|
||||
self._original_stdout = sys.stdout
|
||||
self._original_stderr = sys.stderr
|
||||
sys.stdout = open(os.devnull, 'w')
|
||||
sys.stderr = open(os.devnull, 'w')
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
sys.stdout.close()
|
||||
sys.stdout = self._original_stdout
|
||||
sys.stderr = self._original_stderr
|
||||
|
||||
# load the pdf and split it into chunks
|
||||
loader = OnlinePDFLoader("https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf")
|
||||
data = loader.load()
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
|
||||
all_splits = text_splitter.split_documents(data)
|
||||
|
||||
with SuppressStdout():
|
||||
vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
|
||||
|
||||
while True:
|
||||
query = input("\nQuery: ")
|
||||
if query == "exit":
|
||||
break
|
||||
if query.strip() == "":
|
||||
continue
|
||||
|
||||
# Prompt
|
||||
template = """Use the following pieces of context to answer the question at the end.
|
||||
If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
||||
Use three sentences maximum and keep the answer as concise as possible.
|
||||
{context}
|
||||
Question: {question}
|
||||
Helpful Answer:"""
|
||||
QA_CHAIN_PROMPT = PromptTemplate(
|
||||
input_variables=["context", "question"],
|
||||
template=template,
|
||||
)
|
||||
|
||||
llm = Ollama(model="llama2:13b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
||||
qa_chain = RetrievalQA.from_chain_type(
|
||||
llm,
|
||||
retriever=vectorstore.as_retriever(),
|
||||
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT},
|
||||
)
|
||||
|
||||
result = qa_chain({"query": query})
|
109
examples/langchain-document/requirements.txt
Normal file
109
examples/langchain-document/requirements.txt
Normal file
@@ -0,0 +1,109 @@
|
||||
absl-py==1.4.0
|
||||
aiohttp==3.8.5
|
||||
aiosignal==1.3.1
|
||||
anyio==3.7.1
|
||||
astunparse==1.6.3
|
||||
async-timeout==4.0.3
|
||||
attrs==23.1.0
|
||||
backoff==2.2.1
|
||||
beautifulsoup4==4.12.2
|
||||
bs4==0.0.1
|
||||
cachetools==5.3.1
|
||||
certifi==2023.7.22
|
||||
cffi==1.15.1
|
||||
chardet==5.2.0
|
||||
charset-normalizer==3.2.0
|
||||
Chroma==0.2.0
|
||||
chroma-hnswlib==0.7.2
|
||||
chromadb==0.4.5
|
||||
click==8.1.6
|
||||
coloredlogs==15.0.1
|
||||
cryptography==41.0.3
|
||||
dataclasses-json==0.5.14
|
||||
fastapi==0.99.1
|
||||
filetype==1.2.0
|
||||
flatbuffers==23.5.26
|
||||
frozenlist==1.4.0
|
||||
gast==0.4.0
|
||||
google-auth==2.22.0
|
||||
google-auth-oauthlib==1.0.0
|
||||
google-pasta==0.2.0
|
||||
gpt4all==1.0.8
|
||||
grpcio==1.57.0
|
||||
h11==0.14.0
|
||||
h5py==3.9.0
|
||||
httptools==0.6.0
|
||||
humanfriendly==10.0
|
||||
idna==3.4
|
||||
importlib-resources==6.0.1
|
||||
joblib==1.3.2
|
||||
keras==2.13.1
|
||||
langchain==0.0.261
|
||||
langsmith==0.0.21
|
||||
libclang==16.0.6
|
||||
lxml==4.9.3
|
||||
Markdown==3.4.4
|
||||
MarkupSafe==2.1.3
|
||||
marshmallow==3.20.1
|
||||
monotonic==1.6
|
||||
mpmath==1.3.0
|
||||
multidict==6.0.4
|
||||
mypy-extensions==1.0.0
|
||||
nltk==3.8.1
|
||||
numexpr==2.8.5
|
||||
numpy==1.24.3
|
||||
oauthlib==3.2.2
|
||||
onnxruntime==1.15.1
|
||||
openapi-schema-pydantic==1.2.4
|
||||
opt-einsum==3.3.0
|
||||
overrides==7.4.0
|
||||
packaging==23.1
|
||||
pdf2image==1.16.3
|
||||
pdfminer==20191125
|
||||
pdfminer.six==20221105
|
||||
Pillow==10.0.0
|
||||
posthog==3.0.1
|
||||
protobuf==4.24.0
|
||||
pulsar-client==3.2.0
|
||||
pyasn1==0.5.0
|
||||
pyasn1-modules==0.3.0
|
||||
pycparser==2.21
|
||||
pycryptodome==3.18.0
|
||||
pydantic==1.10.12
|
||||
PyPika==0.48.9
|
||||
python-dateutil==2.8.2
|
||||
python-dotenv==1.0.0
|
||||
python-magic==0.4.27
|
||||
PyYAML==6.0.1
|
||||
regex==2023.8.8
|
||||
requests==2.31.0
|
||||
requests-oauthlib==1.3.1
|
||||
rsa==4.9
|
||||
six==1.16.0
|
||||
sniffio==1.3.0
|
||||
soupsieve==2.4.1
|
||||
SQLAlchemy==2.0.19
|
||||
starlette==0.27.0
|
||||
sympy==1.12
|
||||
tabulate==0.9.0
|
||||
tenacity==8.2.2
|
||||
tensorboard==2.13.0
|
||||
tensorboard-data-server==0.7.1
|
||||
tensorflow==2.13.0
|
||||
tensorflow-estimator==2.13.0
|
||||
tensorflow-hub==0.14.0
|
||||
tensorflow-macos==2.13.0
|
||||
termcolor==2.3.0
|
||||
tokenizers==0.13.3
|
||||
tqdm==4.66.1
|
||||
typing-inspect==0.9.0
|
||||
typing_extensions==4.5.0
|
||||
unstructured==0.9.2
|
||||
urllib3==1.26.16
|
||||
uvicorn==0.23.2
|
||||
uvloop==0.17.0
|
||||
watchfiles==0.19.0
|
||||
websockets==11.0.3
|
||||
Werkzeug==2.3.6
|
||||
wrapt==1.15.0
|
||||
yarl==1.9.2
|
15
examples/langchain-web-summary/README.md
Normal file
15
examples/langchain-web-summary/README.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# LangChain Web Summarization
|
||||
|
||||
This example summarizes a website
|
||||
|
||||
## Setup
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```
|
||||
python main.py
|
||||
```
|
12
examples/langchain-web-summary/main.py
Normal file
12
examples/langchain-web-summary/main.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from langchain.llms import Ollama
|
||||
from langchain.document_loaders import WebBaseLoader
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
|
||||
loader = WebBaseLoader("https://ollama.ai/blog/run-llama2-uncensored-locally")
|
||||
docs = loader.load()
|
||||
|
||||
llm = Ollama(model="llama2")
|
||||
chain = load_summarize_chain(llm, chain_type="stuff")
|
||||
|
||||
result = chain.run(docs)
|
||||
print(result)
|
2
examples/langchain-web-summary/requirements.txt
Normal file
2
examples/langchain-web-summary/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
langchain==0.0.259
|
||||
bs4==0.0.1
|
21
examples/langchain/README.md
Normal file
21
examples/langchain/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# LangChain
|
||||
|
||||
This example is a basic "hello world" of using LangChain with Ollama.
|
||||
|
||||
## Setup
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Run
|
||||
|
||||
```
|
||||
python main.py
|
||||
```
|
||||
|
||||
Running this example will print the response for "hello":
|
||||
|
||||
```
|
||||
Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat?
|
||||
```
|
4
examples/langchain/main.py
Normal file
4
examples/langchain/main.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from langchain.llms import Ollama
|
||||
llm = Ollama(model="llama2")
|
||||
res = llm.predict("hello")
|
||||
print (res)
|
1
examples/langchain/requirements.txt
Normal file
1
examples/langchain/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
langchain==0.0.259
|
170
examples/privategpt/.gitignore
vendored
Normal file
170
examples/privategpt/.gitignore
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
# OSX
|
||||
.DS_STORE
|
||||
|
||||
# Models
|
||||
models/
|
||||
|
||||
# Local Chroma db
|
||||
.chroma/
|
||||
db/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
201
examples/privategpt/LICENSE
Normal file
201
examples/privategpt/LICENSE
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
91
examples/privategpt/README.md
Normal file
91
examples/privategpt/README.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# PrivateGPT with Llama 2 uncensored
|
||||
|
||||
https://github.com/jmorganca/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b
|
||||
|
||||
> Note: this example is a slightly modified version of PrivateGPT using models such as Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it, and you can find his GitHub repo [here](https://github.com/imartinez/privateGPT).
|
||||
|
||||
### Setup
|
||||
|
||||
Set up a virtual environment (optional):
|
||||
|
||||
```
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
Install the Python dependencies:
|
||||
|
||||
```shell
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Pull the model you'd like to use:
|
||||
|
||||
```
|
||||
ollama pull llama2-uncensored
|
||||
```
|
||||
|
||||
### Getting WeWork's latest quarterly earnings report (10-Q)
|
||||
|
||||
```
|
||||
mkdir source_documents
|
||||
curl https://d18rn0p25nwr6d.cloudfront.net/CIK-0001813756/975b3e9b-268e-4798-a9e4-2a9a7c92dc10.pdf -o source_documents/wework.pdf
|
||||
```
|
||||
|
||||
### Ingesting files
|
||||
|
||||
```shell
|
||||
python ingest.py
|
||||
```
|
||||
|
||||
Output should look like this:
|
||||
|
||||
```shell
|
||||
Creating new vectorstore
|
||||
Loading documents from source_documents
|
||||
Loading new documents: 100%|██████████████████████| 1/1 [00:01<00:00, 1.73s/it]
|
||||
Loaded 1 new documents from source_documents
|
||||
Split into 90 chunks of text (max. 500 tokens each)
|
||||
Creating embeddings. May take some minutes...
|
||||
Using embedded DuckDB with persistence: data will be stored in: db
|
||||
Ingestion complete! You can now run privateGPT.py to query your documents
|
||||
```
|
||||
|
||||
### Ask questions
|
||||
|
||||
```shell
|
||||
python privateGPT.py
|
||||
|
||||
Enter a query: How many locations does WeWork have?
|
||||
|
||||
> Answer (took 17.7 s.):
|
||||
As of June 2023, WeWork has 777 locations worldwide, including 610 Consolidated Locations (as defined in the section entitled Key Performance Indicators).
|
||||
```
|
||||
|
||||
### Try a different model:
|
||||
|
||||
```
|
||||
ollama pull llama2:13b
|
||||
MODEL=llama2:13b python privateGPT.py
|
||||
```
|
||||
|
||||
## Adding more files
|
||||
|
||||
Put any and all your files into the `source_documents` directory
|
||||
|
||||
The supported extensions are:
|
||||
|
||||
- `.csv`: CSV,
|
||||
- `.docx`: Word Document,
|
||||
- `.doc`: Word Document,
|
||||
- `.enex`: EverNote,
|
||||
- `.eml`: Email,
|
||||
- `.epub`: EPub,
|
||||
- `.html`: HTML File,
|
||||
- `.md`: Markdown,
|
||||
- `.msg`: Outlook Message,
|
||||
- `.odt`: Open Document Text,
|
||||
- `.pdf`: Portable Document Format (PDF),
|
||||
- `.pptx` : PowerPoint Document,
|
||||
- `.ppt` : PowerPoint Document,
|
||||
- `.txt`: Text file (UTF-8),
|
12
examples/privategpt/constants.py
Normal file
12
examples/privategpt/constants.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import os
|
||||
from chromadb.config import Settings
|
||||
|
||||
# Define the folder for storing database
|
||||
PERSIST_DIRECTORY = os.environ.get('PERSIST_DIRECTORY', 'db')
|
||||
|
||||
# Define the Chroma settings
|
||||
CHROMA_SETTINGS = Settings(
|
||||
chroma_db_impl='duckdb+parquet',
|
||||
persist_directory=PERSIST_DIRECTORY,
|
||||
anonymized_telemetry=False
|
||||
)
|
161
examples/privategpt/ingest.py
Executable file
161
examples/privategpt/ingest.py
Executable file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import glob
|
||||
from typing import List
|
||||
from multiprocessing import Pool
|
||||
from tqdm import tqdm
|
||||
|
||||
from langchain.document_loaders import (
|
||||
CSVLoader,
|
||||
EverNoteLoader,
|
||||
PyMuPDFLoader,
|
||||
TextLoader,
|
||||
UnstructuredEmailLoader,
|
||||
UnstructuredEPubLoader,
|
||||
UnstructuredHTMLLoader,
|
||||
UnstructuredMarkdownLoader,
|
||||
UnstructuredODTLoader,
|
||||
UnstructuredPowerPointLoader,
|
||||
UnstructuredWordDocumentLoader,
|
||||
)
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.docstore.document import Document
|
||||
from constants import CHROMA_SETTINGS
|
||||
|
||||
|
||||
# Load environment variables
|
||||
persist_directory = os.environ.get('PERSIST_DIRECTORY', 'db')
|
||||
source_directory = os.environ.get('SOURCE_DIRECTORY', 'source_documents')
|
||||
embeddings_model_name = os.environ.get('EMBEDDINGS_MODEL_NAME', 'all-MiniLM-L6-v2')
|
||||
chunk_size = 500
|
||||
chunk_overlap = 50
|
||||
|
||||
# Custom document loaders
|
||||
class MyElmLoader(UnstructuredEmailLoader):
|
||||
"""Wrapper to fallback to text/plain when default does not work"""
|
||||
|
||||
def load(self) -> List[Document]:
|
||||
"""Wrapper adding fallback for elm without html"""
|
||||
try:
|
||||
try:
|
||||
doc = UnstructuredEmailLoader.load(self)
|
||||
except ValueError as e:
|
||||
if 'text/html content not found in email' in str(e):
|
||||
# Try plain text
|
||||
self.unstructured_kwargs["content_source"]="text/plain"
|
||||
doc = UnstructuredEmailLoader.load(self)
|
||||
else:
|
||||
raise
|
||||
except Exception as e:
|
||||
# Add file_path to exception message
|
||||
raise type(e)(f"{self.file_path}: {e}") from e
|
||||
|
||||
return doc
|
||||
|
||||
|
||||
# Map file extensions to document loaders and their arguments
|
||||
LOADER_MAPPING = {
|
||||
".csv": (CSVLoader, {}),
|
||||
# ".docx": (Docx2txtLoader, {}),
|
||||
".doc": (UnstructuredWordDocumentLoader, {}),
|
||||
".docx": (UnstructuredWordDocumentLoader, {}),
|
||||
".enex": (EverNoteLoader, {}),
|
||||
".eml": (MyElmLoader, {}),
|
||||
".epub": (UnstructuredEPubLoader, {}),
|
||||
".html": (UnstructuredHTMLLoader, {}),
|
||||
".md": (UnstructuredMarkdownLoader, {}),
|
||||
".odt": (UnstructuredODTLoader, {}),
|
||||
".pdf": (PyMuPDFLoader, {}),
|
||||
".ppt": (UnstructuredPowerPointLoader, {}),
|
||||
".pptx": (UnstructuredPowerPointLoader, {}),
|
||||
".txt": (TextLoader, {"encoding": "utf8"}),
|
||||
# Add more mappings for other file extensions and loaders as needed
|
||||
}
|
||||
|
||||
|
||||
def load_single_document(file_path: str) -> List[Document]:
|
||||
ext = "." + file_path.rsplit(".", 1)[-1]
|
||||
if ext in LOADER_MAPPING:
|
||||
loader_class, loader_args = LOADER_MAPPING[ext]
|
||||
loader = loader_class(file_path, **loader_args)
|
||||
return loader.load()
|
||||
|
||||
raise ValueError(f"Unsupported file extension '{ext}'")
|
||||
|
||||
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
|
||||
"""
|
||||
Loads all documents from the source documents directory, ignoring specified files
|
||||
"""
|
||||
all_files = []
|
||||
for ext in LOADER_MAPPING:
|
||||
all_files.extend(
|
||||
glob.glob(os.path.join(source_dir, f"**/*{ext}"), recursive=True)
|
||||
)
|
||||
filtered_files = [file_path for file_path in all_files if file_path not in ignored_files]
|
||||
|
||||
with Pool(processes=os.cpu_count()) as pool:
|
||||
results = []
|
||||
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
|
||||
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
|
||||
results.extend(docs)
|
||||
pbar.update()
|
||||
|
||||
return results
|
||||
|
||||
def process_documents(ignored_files: List[str] = []) -> List[Document]:
|
||||
"""
|
||||
Load documents and split in chunks
|
||||
"""
|
||||
print(f"Loading documents from {source_directory}")
|
||||
documents = load_documents(source_directory, ignored_files)
|
||||
if not documents:
|
||||
print("No new documents to load")
|
||||
exit(0)
|
||||
print(f"Loaded {len(documents)} new documents from {source_directory}")
|
||||
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
|
||||
texts = text_splitter.split_documents(documents)
|
||||
print(f"Split into {len(texts)} chunks of text (max. {chunk_size} tokens each)")
|
||||
return texts
|
||||
|
||||
def does_vectorstore_exist(persist_directory: str) -> bool:
|
||||
"""
|
||||
Checks if vectorstore exists
|
||||
"""
|
||||
if os.path.exists(os.path.join(persist_directory, 'index')):
|
||||
if os.path.exists(os.path.join(persist_directory, 'chroma-collections.parquet')) and os.path.exists(os.path.join(persist_directory, 'chroma-embeddings.parquet')):
|
||||
list_index_files = glob.glob(os.path.join(persist_directory, 'index/*.bin'))
|
||||
list_index_files += glob.glob(os.path.join(persist_directory, 'index/*.pkl'))
|
||||
# At least 3 documents are needed in a working vectorstore
|
||||
if len(list_index_files) > 3:
|
||||
return True
|
||||
return False
|
||||
|
||||
def main():
|
||||
# Create embeddings
|
||||
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
|
||||
|
||||
if does_vectorstore_exist(persist_directory):
|
||||
# Update and store locally vectorstore
|
||||
print(f"Appending to existing vectorstore at {persist_directory}")
|
||||
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
|
||||
collection = db.get()
|
||||
texts = process_documents([metadata['source'] for metadata in collection['metadatas']])
|
||||
print(f"Creating embeddings. May take some minutes...")
|
||||
db.add_documents(texts)
|
||||
else:
|
||||
# Create and store locally vectorstore
|
||||
print("Creating new vectorstore")
|
||||
texts = process_documents()
|
||||
print(f"Creating embeddings. May take some minutes...")
|
||||
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)
|
||||
db.persist()
|
||||
db = None
|
||||
|
||||
print(f"Ingestion complete! You can now run privateGPT.py to query your documents")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
3833
examples/privategpt/poetry.lock
generated
Normal file
3833
examples/privategpt/poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
71
examples/privategpt/privateGPT.py
Executable file
71
examples/privategpt/privateGPT.py
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python3
|
||||
from langchain.chains import RetrievalQA
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.llms import Ollama
|
||||
import os
|
||||
import argparse
|
||||
import time
|
||||
|
||||
model = os.environ.get("MODEL", "llama2-uncensored")
|
||||
# For embeddings model, the example uses a sentence-transformers model
|
||||
# https://www.sbert.net/docs/pretrained_models.html
|
||||
# "The all-mpnet-base-v2 model provides the best quality, while all-MiniLM-L6-v2 is 5 times faster and still offers good quality."
|
||||
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME", "all-MiniLM-L6-v2")
|
||||
persist_directory = os.environ.get("PERSIST_DIRECTORY", "db")
|
||||
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
|
||||
|
||||
from constants import CHROMA_SETTINGS
|
||||
|
||||
def main():
|
||||
# Parse the command line arguments
|
||||
args = parse_arguments()
|
||||
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
|
||||
db = Chroma(persist_directory=persist_directory, embedding_function=embeddings, client_settings=CHROMA_SETTINGS)
|
||||
retriever = db.as_retriever(search_kwargs={"k": target_source_chunks})
|
||||
# activate/deactivate the streaming StdOut callback for LLMs
|
||||
callbacks = [] if args.mute_stream else [StreamingStdOutCallbackHandler()]
|
||||
|
||||
llm = Ollama(model=model, callbacks=callbacks)
|
||||
|
||||
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents= not args.hide_source)
|
||||
# Interactive questions and answers
|
||||
while True:
|
||||
query = input("\nEnter a query: ")
|
||||
if query == "exit":
|
||||
break
|
||||
if query.strip() == "":
|
||||
continue
|
||||
|
||||
# Get the answer from the chain
|
||||
start = time.time()
|
||||
res = qa(query)
|
||||
answer, docs = res['result'], [] if args.hide_source else res['source_documents']
|
||||
end = time.time()
|
||||
|
||||
# Print the result
|
||||
print("\n\n> Question:")
|
||||
print(query)
|
||||
print(answer)
|
||||
|
||||
# Print the relevant sources used for the answer
|
||||
for document in docs:
|
||||
print("\n> " + document.metadata["source"] + ":")
|
||||
print(document.page_content)
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(description='privateGPT: Ask questions to your documents without an internet connection, '
|
||||
'using the power of LLMs.')
|
||||
parser.add_argument("--hide-source", "-S", action='store_true',
|
||||
help='Use this flag to disable printing of source documents used for answers.')
|
||||
|
||||
parser.add_argument("--mute-stream", "-M",
|
||||
action='store_true',
|
||||
help='Use this flag to disable the streaming StdOut callback for LLMs.')
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
26
examples/privategpt/pyproject.toml
Normal file
26
examples/privategpt/pyproject.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
[tool.poetry]
|
||||
name = "privategpt"
|
||||
version = "0.1.0"
|
||||
description = ""
|
||||
authors = ["Ivan Martinez <ivanmartit@gmail.com>"]
|
||||
license = "Apache Version 2.0"
|
||||
readme = "README.md"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.10"
|
||||
langchain = "0.0.261"
|
||||
gpt4all = "^1.0.3"
|
||||
chromadb = "^0.3.26"
|
||||
PyMuPDF = "^1.22.5"
|
||||
python-dotenv = "^1.0.0"
|
||||
unstructured = "^0.8.0"
|
||||
extract-msg = "^0.41.5"
|
||||
tabulate = "^0.9.0"
|
||||
pandoc = "^2.3"
|
||||
pypandoc = "^1.11"
|
||||
tqdm = "^4.65.0"
|
||||
sentence-transformers = "^2.2.2"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
2002
examples/privategpt/requirements.txt
Normal file
2002
examples/privategpt/requirements.txt
Normal file
File diff suppressed because it is too large
Load Diff
38
examples/python/client.py
Normal file
38
examples/python/client.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import json
|
||||
import requests
|
||||
|
||||
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
||||
model = 'llama2' # TODO: update this for whatever model you wish to use
|
||||
|
||||
def generate(prompt, context):
|
||||
r = requests.post('http://localhost:11434/api/generate',
|
||||
json={
|
||||
'model': model,
|
||||
'prompt': prompt,
|
||||
'context': context,
|
||||
},
|
||||
stream=True)
|
||||
r.raise_for_status()
|
||||
|
||||
for line in r.iter_lines():
|
||||
body = json.loads(line)
|
||||
response_part = body.get('response', '')
|
||||
# the response streams one token at a time, print that as we recieve it
|
||||
print(response_part, end='', flush=True)
|
||||
|
||||
if 'error' in body:
|
||||
raise Exception(body['error'])
|
||||
|
||||
if body.get('done', False):
|
||||
return body['context']
|
||||
|
||||
def main():
|
||||
context = [] # the context stores a conversation history, you can use this to make the model more context aware
|
||||
while True:
|
||||
user_input = input("Enter a prompt: ")
|
||||
print()
|
||||
context = generate(user_input, context)
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
183
format/openssh.go
Normal file
183
format/openssh.go
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code originally from https://go-review.googlesource.com/c/crypto/+/218620
|
||||
|
||||
// TODO: replace with upstream once the above change is merged and released.
|
||||
|
||||
package format
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"encoding/binary"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const privateKeyAuthMagic = "openssh-key-v1\x00"
|
||||
|
||||
type openSSHEncryptedPrivateKey struct {
|
||||
CipherName string
|
||||
KDFName string
|
||||
KDFOptions string
|
||||
KeysCount uint32
|
||||
PubKey []byte
|
||||
KeyBlocks []byte
|
||||
}
|
||||
|
||||
type openSSHPrivateKey struct {
|
||||
Check1 uint32
|
||||
Check2 uint32
|
||||
Keytype string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type openSSHRSAPrivateKey struct {
|
||||
N *big.Int
|
||||
E *big.Int
|
||||
D *big.Int
|
||||
Iqmp *big.Int
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type openSSHECDSAPrivateKey struct {
|
||||
Curve string
|
||||
Pub []byte
|
||||
D *big.Int
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
type openSSHEd25519PrivateKey struct {
|
||||
Pub []byte
|
||||
Priv []byte
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
func OpenSSHPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) {
|
||||
var check uint32
|
||||
if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pk1 openSSHPrivateKey
|
||||
pk1.Check1 = check
|
||||
pk1.Check2 = check
|
||||
|
||||
var w openSSHEncryptedPrivateKey
|
||||
w.KeysCount = 1
|
||||
|
||||
if k, ok := key.(*ed25519.PrivateKey); ok {
|
||||
key = *k
|
||||
}
|
||||
|
||||
switch k := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
e := new(big.Int).SetInt64(int64(k.E))
|
||||
|
||||
key := openSSHRSAPrivateKey{
|
||||
N: k.N,
|
||||
E: e,
|
||||
D: k.D,
|
||||
Iqmp: k.Precomputed.Qinv,
|
||||
P: k.Primes[0],
|
||||
Q: k.Primes[1],
|
||||
Comment: comment,
|
||||
}
|
||||
|
||||
pk1.Keytype = ssh.KeyAlgoRSA
|
||||
pk1.Rest = ssh.Marshal(key)
|
||||
|
||||
w.PubKey = ssh.Marshal(struct {
|
||||
KeyType string
|
||||
E *big.Int
|
||||
N *big.Int
|
||||
}{
|
||||
ssh.KeyAlgoRSA, e, k.N,
|
||||
})
|
||||
case *ecdsa.PrivateKey:
|
||||
var curve, keytype string
|
||||
switch name := k.Curve.Params().Name; name {
|
||||
case "P-256":
|
||||
curve = "nistp256"
|
||||
keytype = ssh.KeyAlgoECDSA256
|
||||
case "P-384":
|
||||
curve = "nistp384"
|
||||
keytype = ssh.KeyAlgoECDSA384
|
||||
case "P-521":
|
||||
curve = "nistp521"
|
||||
keytype = ssh.KeyAlgoECDSA521
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unknown curve %q", name)
|
||||
}
|
||||
|
||||
pub := elliptic.Marshal(k.Curve, k.X, k.Y)
|
||||
|
||||
key := openSSHECDSAPrivateKey{
|
||||
Curve: curve,
|
||||
Pub: pub,
|
||||
D: k.D,
|
||||
Comment: comment,
|
||||
}
|
||||
|
||||
pk1.Keytype = keytype
|
||||
pk1.Rest = ssh.Marshal(key)
|
||||
|
||||
w.PubKey = ssh.Marshal(struct {
|
||||
KeyType string
|
||||
Curve string
|
||||
Pub []byte
|
||||
}{
|
||||
keytype, curve, pub,
|
||||
})
|
||||
case ed25519.PrivateKey:
|
||||
pub, priv := k[32:], k
|
||||
key := openSSHEd25519PrivateKey{
|
||||
Pub: pub,
|
||||
Priv: priv,
|
||||
Comment: comment,
|
||||
}
|
||||
|
||||
pk1.Keytype = ssh.KeyAlgoED25519
|
||||
pk1.Rest = ssh.Marshal(key)
|
||||
|
||||
w.PubKey = ssh.Marshal(struct {
|
||||
KeyType string
|
||||
Pub []byte
|
||||
}{
|
||||
ssh.KeyAlgoED25519, pub,
|
||||
})
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unknown key type %T", k)
|
||||
}
|
||||
|
||||
w.KeyBlocks = openSSHPadding(ssh.Marshal(pk1), 8)
|
||||
|
||||
w.CipherName, w.KDFName, w.KDFOptions = "none", "none", ""
|
||||
|
||||
return &pem.Block{
|
||||
Type: "OPENSSH PRIVATE KEY",
|
||||
Bytes: append([]byte(privateKeyAuthMagic), ssh.Marshal(w)...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func openSSHPadding(block []byte, blocksize int) []byte {
|
||||
for i, j := 0, len(block); (j+i)%blocksize != 0; i++ {
|
||||
block = append(block, byte(i+1))
|
||||
}
|
||||
|
||||
return block
|
||||
}
|
1
go.mod
1
go.mod
@@ -32,6 +32,7 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
|
2
go.sum
2
go.sum
@@ -78,6 +78,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
@@ -420,6 +420,14 @@ static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node)
|
||||
if (parent == NULL) {
|
||||
break;
|
||||
}
|
||||
|
||||
// if the node's data is external, then we cannot re-use it
|
||||
if ((char *) parent->data < (char *) alloc->data ||
|
||||
(char *) parent->data >= ((char *) alloc->data + alloc->size)) {
|
||||
AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
|
||||
continue;
|
||||
}
|
||||
|
||||
struct hash_node * p_hn = hash_get(ht, parent);
|
||||
if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) {
|
||||
if (ggml_is_view(parent)) {
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,7 +1,7 @@
|
||||
//go:build darwin
|
||||
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,7 +1,7 @@
|
||||
//go:build darwin
|
||||
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
@@ -35,6 +35,11 @@
|
||||
#import <Metal/Metal.h>
|
||||
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
|
||||
|
||||
#undef MIN
|
||||
#undef MAX
|
||||
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
#ifdef GGML_METAL_NDEBUG
|
||||
#define metal_printf(...)
|
||||
#else
|
||||
@@ -43,6 +48,8 @@
|
||||
|
||||
#define UNUSED(x) (void)(x)
|
||||
|
||||
#define GGML_MAX_CONCUR (2*GGML_MAX_NODES)
|
||||
|
||||
struct ggml_metal_buffer {
|
||||
const char * name;
|
||||
|
||||
@@ -64,7 +71,7 @@ struct ggml_metal_context {
|
||||
int n_buffers;
|
||||
struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
|
||||
|
||||
int concur_list[GGML_MAX_NODES];
|
||||
int concur_list[GGML_MAX_CONCUR];
|
||||
int concur_list_len;
|
||||
|
||||
// custom kernels
|
||||
@@ -147,7 +154,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||
ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error];
|
||||
if (error) {
|
||||
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
|
||||
exit(1);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
#else
|
||||
@@ -165,7 +172,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||
NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
|
||||
if (error) {
|
||||
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
|
||||
exit(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef GGML_QKK_64
|
||||
@@ -177,7 +184,7 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||
#endif
|
||||
if (error) {
|
||||
fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
|
||||
exit(1);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -398,15 +405,15 @@ void ggml_metal_graph_find_concurrency(
|
||||
struct ggml_metal_context * ctx,
|
||||
struct ggml_cgraph * gf) {
|
||||
int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time
|
||||
int nodes_unused[GGML_MAX_NODES];
|
||||
int nodes_unused[GGML_MAX_CONCUR];
|
||||
|
||||
for (int i = 0; i < GGML_MAX_NODES; i++) {ctx->concur_list[i] = 0;}
|
||||
for (int i = 0; i < gf->n_nodes; i++) {nodes_unused[i] = 1;}
|
||||
for (int i = 0; i < GGML_MAX_CONCUR; i++) { ctx->concur_list[i] = 0; }
|
||||
for (int i = 0; i < gf->n_nodes; i++) { nodes_unused[i] = 1; }
|
||||
ctx->concur_list_len = 0;
|
||||
|
||||
int n_left = gf->n_nodes;
|
||||
int n_start = 0; // all nodes before n_start at nodes_unused array have been sorted and store back to ctx->concur_list
|
||||
int level_pos = 0; // at ctx->concur_list, the last layer (level) ends at level_pos
|
||||
int n_left = gf->n_nodes;
|
||||
int n_start = 0; // all nodes before n_start at nodes_unused array have been sorted and store back to ctx->concur_list
|
||||
int level_pos = 0; // at ctx->concur_list, the last layer (level) ends at level_pos
|
||||
|
||||
while (n_left > 0) {
|
||||
// number of nodes at a layer (that can be issued concurrently)
|
||||
@@ -414,28 +421,40 @@ void ggml_metal_graph_find_concurrency(
|
||||
for (int i = n_start; i < ((n_start + search_depth > gf->n_nodes) ? gf->n_nodes : n_start + search_depth); i++) {
|
||||
if (nodes_unused[i]) {
|
||||
// if the requirements for gf->nodes[i] are satisfied
|
||||
int exe_flag=1;
|
||||
int exe_flag = 1;
|
||||
|
||||
// scan all srcs
|
||||
for (int src_ind = 0; src_ind < GGML_MAX_SRC; src_ind++) {
|
||||
struct ggml_tensor * src_cur = gf->nodes[i]->src[src_ind];
|
||||
if (src_cur) {
|
||||
// if is leaf nodes it's satisfied.
|
||||
if (src_cur->op == GGML_OP_NONE && src_cur->grad == NULL) {continue;}
|
||||
// TODO: ggml_is_leaf()
|
||||
if (src_cur->op == GGML_OP_NONE && src_cur->grad == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// otherwise this src should be the output from previous nodes.
|
||||
int is_found = 0;
|
||||
|
||||
// scan 2*search_depth back because we inserted barrier.
|
||||
for (int j = ((level_pos - 2*search_depth) < 0 ? 0 : (level_pos - 2*search_depth)); j < level_pos; j++) {
|
||||
if (gf->nodes[ctx->concur_list[j]] == src_cur) {is_found = 1; break;}
|
||||
//for (int j = ((level_pos - 2*search_depth) < 0 ? 0 : (level_pos - 2*search_depth)); j < level_pos; j++) {
|
||||
for (int j = MAX(0, level_pos - 2*search_depth); j < level_pos; j++) {
|
||||
if (ctx->concur_list[j] >= 0 && gf->nodes[ctx->concur_list[j]] == src_cur) {
|
||||
is_found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (is_found == 0) {
|
||||
exe_flag = 0;
|
||||
break;
|
||||
}
|
||||
if (is_found == 0) {exe_flag = 0; break;}
|
||||
}
|
||||
}
|
||||
if (exe_flag) {
|
||||
// check if nodes[i]'s data will be overwritten by a node before nodes[i].
|
||||
// if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3]
|
||||
int64_t data_start = (int64_t) gf->nodes[i]->data;
|
||||
int64_t length = (int64_t) ggml_nbytes(gf->nodes[i]);
|
||||
int64_t length = (int64_t) ggml_nbytes(gf->nodes[i]);
|
||||
for (int j = n_start; j < i; j++) {
|
||||
if (nodes_unused[j] && gf->nodes[j]->op != GGML_OP_RESHAPE \
|
||||
&& gf->nodes[j]->op != GGML_OP_VIEW \
|
||||
@@ -444,9 +463,9 @@ void ggml_metal_graph_find_concurrency(
|
||||
if (((int64_t)gf->nodes[j]->data) >= data_start + length || \
|
||||
((int64_t)gf->nodes[j]->data) + (int64_t) ggml_nbytes(gf->nodes[j]) <= data_start) {
|
||||
continue;
|
||||
} else {
|
||||
exe_flag = 0;
|
||||
}
|
||||
|
||||
exe_flag = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -463,11 +482,13 @@ void ggml_metal_graph_find_concurrency(
|
||||
ctx->concur_list[level_pos + concurrency] = -1;
|
||||
ctx->concur_list_len++;
|
||||
// jump all sorted nodes at nodes_bak
|
||||
while (!nodes_unused[n_start]) {n_start++;}
|
||||
while (!nodes_unused[n_start]) {
|
||||
n_start++;
|
||||
}
|
||||
level_pos += concurrency + 1;
|
||||
}
|
||||
|
||||
if (ctx->concur_list_len > GGML_MAX_NODES) {
|
||||
if (ctx->concur_list_len > GGML_MAX_CONCUR) {
|
||||
fprintf(stderr, "%s: too many elements for metal ctx->concur_list!\n", __func__);
|
||||
}
|
||||
}
|
||||
@@ -481,7 +502,7 @@ void ggml_metal_graph_compute(
|
||||
// else fallback to serial dispatch
|
||||
MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
|
||||
|
||||
const bool has_concur = ctx->concur_list_len && ctx->concur_list_len <= GGML_MAX_NODES;
|
||||
const bool has_concur = ctx->concur_list_len && ctx->concur_list_len <= GGML_MAX_CONCUR;
|
||||
|
||||
const int n_nodes = has_concur ? ctx->concur_list_len : gf->n_nodes;
|
||||
edesc.dispatchType = has_concur ? MTLDispatchTypeConcurrent : MTLDispatchTypeSerial;
|
@@ -1,7 +1,7 @@
|
||||
//go:build darwin
|
||||
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,7 +1,7 @@
|
||||
//go:build mpi
|
||||
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,7 +1,7 @@
|
||||
//go:build mpi
|
||||
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,7 +1,7 @@
|
||||
//go:build opencl
|
||||
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,7 +1,7 @@
|
||||
//go:build opencl
|
||||
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
@@ -221,8 +221,8 @@ typedef void * thread_ret_t;
|
||||
#define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
|
||||
#define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
|
||||
#else
|
||||
inline static void* ggml_aligned_malloc(size_t size) {
|
||||
void* aligned_memory = NULL;
|
||||
inline static void * ggml_aligned_malloc(size_t size) {
|
||||
void * aligned_memory = NULL;
|
||||
#ifdef GGML_USE_METAL
|
||||
int result = posix_memalign(&aligned_memory, getpagesize(), size);
|
||||
#else
|
||||
@@ -3837,7 +3837,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
|
||||
"CROSS_ENTROPY_LOSS_BACK",
|
||||
};
|
||||
|
||||
static_assert(GGML_OP_COUNT == 59, "GGML_OP_COUNT != 59");
|
||||
static_assert(GGML_OP_COUNT == 62, "GGML_OP_COUNT != 62");
|
||||
|
||||
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"none",
|
||||
@@ -3909,7 +3909,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||
"cross_entropy_loss_back(x,y)",
|
||||
};
|
||||
|
||||
static_assert(GGML_OP_COUNT == 59, "GGML_OP_COUNT != 59");
|
||||
static_assert(GGML_OP_COUNT == 62, "GGML_OP_COUNT != 62");
|
||||
|
||||
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
|
||||
|
||||
@@ -4136,7 +4136,7 @@ size_t ggml_nbytes(const struct ggml_tensor * tensor) {
|
||||
//
|
||||
// is enough, but just in case, adding the second part
|
||||
|
||||
return MAX(tensor->ne[3]*tensor->nb[3], (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]);
|
||||
return GGML_PAD(MAX(tensor->ne[3]*tensor->nb[3], (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type]), GGML_MEM_ALIGN);
|
||||
}
|
||||
|
||||
size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
|
||||
@@ -4279,7 +4279,7 @@ static inline bool ggml_is_padded_1d(const struct ggml_tensor * tensor) {
|
||||
tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
|
||||
}
|
||||
|
||||
static inline bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
|
||||
bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
|
||||
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
|
||||
|
||||
return
|
||||
@@ -4628,7 +4628,7 @@ static struct ggml_tensor * ggml_new_tensor_impl(
|
||||
/*.ne =*/ { 1, 1, 1, 1 },
|
||||
/*.nb =*/ { 0, 0, 0, 0 },
|
||||
/*.op =*/ GGML_OP_NONE,
|
||||
/*.op_params =*/ {0},
|
||||
/*.op_params =*/ { 0 },
|
||||
/*.is_param =*/ false,
|
||||
/*.grad =*/ NULL,
|
||||
/*.src =*/ { NULL },
|
||||
@@ -4660,6 +4660,7 @@ static struct ggml_tensor * ggml_new_tensor_impl(
|
||||
}
|
||||
|
||||
static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
|
||||
GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
|
||||
assert(params_size <= GGML_MAX_OP_PARAMS);
|
||||
memcpy(tensor->op_params, params, params_size);
|
||||
}
|
||||
@@ -6465,7 +6466,7 @@ struct ggml_tensor * ggml_permute(
|
||||
result->src[0] = a;
|
||||
|
||||
int32_t params[] = { axis0, axis1, axis2, axis3 };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
return result;
|
||||
}
|
||||
@@ -6591,7 +6592,7 @@ static struct ggml_tensor * ggml_diag_mask_inf_impl(
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
int32_t params[] = { n_past, inplace ? 1 : 0 };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_DIAG_MASK_INF;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6631,7 +6632,7 @@ static struct ggml_tensor * ggml_diag_mask_zero_impl(
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
int32_t params[] = { n_past, inplace ? 1 : 0 };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_DIAG_MASK_ZERO;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6747,9 +6748,9 @@ static struct ggml_tensor * ggml_rope_impl(
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
int32_t params[6] = { n_past, n_dims, mode, n_ctx };
|
||||
memcpy(params + 4, &freq_base, sizeof(float));
|
||||
memcpy(params + 4, &freq_base, sizeof(float));
|
||||
memcpy(params + 5, &freq_scale, sizeof(float));
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_ROPE;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6823,7 +6824,7 @@ struct ggml_tensor * ggml_rope_back(
|
||||
struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
|
||||
|
||||
int32_t params[] = { n_past, n_dims, mode, n_ctx };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_ROPE_BACK;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6854,7 +6855,7 @@ struct ggml_tensor * ggml_alibi(
|
||||
|
||||
int32_t op_params[3] = { n_past, n_head };
|
||||
memcpy(op_params + 2, &bias_max, sizeof(float));
|
||||
ggml_set_op_params(result, &op_params, sizeof(op_params));
|
||||
ggml_set_op_params(result, op_params, sizeof(op_params));
|
||||
|
||||
result->op = GGML_OP_ALIBI;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6881,7 +6882,7 @@ struct ggml_tensor * ggml_clamp(
|
||||
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
||||
|
||||
float params[] = { min, max };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_CLAMP;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6916,10 +6917,10 @@ GGML_API struct ggml_tensor * ggml_conv_1d(
|
||||
ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
|
||||
a->ne[2], 1, 1,
|
||||
};
|
||||
struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
|
||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
|
||||
|
||||
int32_t params[] = { s0, p0, d0 };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_CONV_1D;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6931,10 +6932,10 @@ GGML_API struct ggml_tensor * ggml_conv_1d(
|
||||
|
||||
// ggml_conv_2d
|
||||
|
||||
struct ggml_tensor* ggml_conv_2d(
|
||||
struct ggml_context* ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * ggml_conv_2d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
int s0,
|
||||
int s1,
|
||||
int p0,
|
||||
@@ -6955,10 +6956,10 @@ struct ggml_tensor* ggml_conv_2d(
|
||||
ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1),
|
||||
a->ne[3], b->ne[3],
|
||||
};
|
||||
struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
|
||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
|
||||
|
||||
int32_t params[] = { s0, s1, p0, p1, d0, d1 };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_CONV_2D;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -6971,7 +6972,7 @@ struct ggml_tensor* ggml_conv_2d(
|
||||
|
||||
// ggml_conv_1d_ph
|
||||
|
||||
struct ggml_tensor* ggml_conv_1d_ph(
|
||||
struct ggml_tensor * ggml_conv_1d_ph(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
@@ -6989,7 +6990,7 @@ static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) {
|
||||
|
||||
// ggml_pool_1d
|
||||
|
||||
struct ggml_tensor* ggml_pool_1d(
|
||||
struct ggml_tensor * ggml_pool_1d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
enum ggml_op_pool op,
|
||||
@@ -7008,10 +7009,10 @@ struct ggml_tensor* ggml_pool_1d(
|
||||
ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
|
||||
a->ne[1],
|
||||
};
|
||||
struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
|
||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
|
||||
|
||||
int32_t params[] = { op, k0, s0, p0 };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_POOL_1D;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -7022,7 +7023,7 @@ struct ggml_tensor* ggml_pool_1d(
|
||||
|
||||
// ggml_pool_2d
|
||||
|
||||
struct ggml_tensor* ggml_pool_2d(
|
||||
struct ggml_tensor * ggml_pool_2d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
enum ggml_op_pool op,
|
||||
@@ -7045,10 +7046,10 @@ struct ggml_tensor* ggml_pool_2d(
|
||||
ggml_calc_pool_output_size(a->ne[1], k1, s1, p1),
|
||||
a->ne[2],
|
||||
};
|
||||
struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
|
||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
|
||||
|
||||
int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_POOL_2D;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -7216,7 +7217,7 @@ struct ggml_tensor * ggml_win_part(
|
||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
|
||||
|
||||
int32_t params[] = { npx, npy, w };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_WIN_PART;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -7246,7 +7247,7 @@ struct ggml_tensor * ggml_win_unpart(
|
||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 3, ne);
|
||||
|
||||
int32_t params[] = { w };
|
||||
ggml_set_op_params(result, ¶ms, sizeof(params));
|
||||
ggml_set_op_params(result, params, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_WIN_UNPART;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
@@ -7375,7 +7376,7 @@ struct ggml_tensor * ggml_map_binary_inplace_f32(
|
||||
return ggml_map_binary_impl_f32(ctx, a, b, fun, true);
|
||||
}
|
||||
|
||||
// ggml_map_custom1
|
||||
// ggml_map_custom1_f32
|
||||
|
||||
static struct ggml_tensor * ggml_map_custom1_impl_f32(
|
||||
struct ggml_context * ctx,
|
||||
@@ -7392,7 +7393,7 @@ static struct ggml_tensor * ggml_map_custom1_impl_f32(
|
||||
|
||||
ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
|
||||
|
||||
result->op = GGML_OP_MAP_CUSTOM1;
|
||||
result->op = GGML_OP_MAP_CUSTOM1_F32;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
|
||||
@@ -7413,7 +7414,7 @@ struct ggml_tensor * ggml_map_custom1_inplace_f32(
|
||||
return ggml_map_custom1_impl_f32(ctx, a, fun, true);
|
||||
}
|
||||
|
||||
// ggml_map_custom2
|
||||
// ggml_map_custom2_f32
|
||||
|
||||
static struct ggml_tensor * ggml_map_custom2_impl_f32(
|
||||
struct ggml_context * ctx,
|
||||
@@ -7431,7 +7432,7 @@ static struct ggml_tensor * ggml_map_custom2_impl_f32(
|
||||
|
||||
ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
|
||||
|
||||
result->op = GGML_OP_MAP_CUSTOM2;
|
||||
result->op = GGML_OP_MAP_CUSTOM2_F32;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
result->src[1] = b;
|
||||
@@ -7455,7 +7456,7 @@ struct ggml_tensor * ggml_map_custom2_inplace_f32(
|
||||
return ggml_map_custom2_impl_f32(ctx, a, b, fun, true);
|
||||
}
|
||||
|
||||
// ggml_map_custom3
|
||||
// ggml_map_custom3_f32
|
||||
|
||||
static struct ggml_tensor * ggml_map_custom3_impl_f32(
|
||||
struct ggml_context * ctx,
|
||||
@@ -7474,7 +7475,7 @@ static struct ggml_tensor * ggml_map_custom3_impl_f32(
|
||||
|
||||
ggml_set_op_params(result, (const void *) &fun, sizeof(fun));
|
||||
|
||||
result->op = GGML_OP_MAP_CUSTOM3;
|
||||
result->op = GGML_OP_MAP_CUSTOM3_F32;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
result->src[1] = b;
|
||||
@@ -7501,6 +7502,190 @@ struct ggml_tensor * ggml_map_custom3_inplace_f32(
|
||||
return ggml_map_custom3_impl_f32(ctx, a, b, c, fun, true);
|
||||
}
|
||||
|
||||
// ggml_map_custom1
|
||||
struct ggml_map_custom1_op_params {
|
||||
ggml_custom1_op_t fun;
|
||||
int n_tasks;
|
||||
void * userdata;
|
||||
};
|
||||
|
||||
static struct ggml_tensor * ggml_map_custom1_impl(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
const ggml_custom1_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata,
|
||||
bool inplace) {
|
||||
GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
|
||||
|
||||
bool is_node = false;
|
||||
|
||||
if (!inplace && a->grad) {
|
||||
is_node = true;
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
struct ggml_map_custom1_op_params params = {
|
||||
/*.fun =*/ fun,
|
||||
/*.n_tasks =*/ n_tasks,
|
||||
/*.userdata =*/ userdata
|
||||
};
|
||||
ggml_set_op_params(result, (const void *) ¶ms, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_MAP_CUSTOM1;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_map_custom1(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
const ggml_custom1_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata) {
|
||||
return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_map_custom1_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
const ggml_custom1_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata) {
|
||||
return ggml_map_custom1_impl(ctx, a, fun, n_tasks, userdata, true);
|
||||
}
|
||||
|
||||
// ggml_map_custom2
|
||||
|
||||
struct ggml_map_custom2_op_params {
|
||||
ggml_custom2_op_t fun;
|
||||
int n_tasks;
|
||||
void * userdata;
|
||||
};
|
||||
|
||||
static struct ggml_tensor * ggml_map_custom2_impl(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
const ggml_custom2_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata,
|
||||
bool inplace) {
|
||||
GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
|
||||
|
||||
bool is_node = false;
|
||||
|
||||
if (!inplace && (a->grad || b->grad)) {
|
||||
is_node = true;
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
struct ggml_map_custom2_op_params params = {
|
||||
/*.fun =*/ fun,
|
||||
/*.n_tasks =*/ n_tasks,
|
||||
/*.userdata =*/ userdata
|
||||
};
|
||||
ggml_set_op_params(result, (const void *) ¶ms, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_MAP_CUSTOM2;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
result->src[1] = b;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_map_custom2(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
const ggml_custom2_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata) {
|
||||
return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_map_custom2_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
const ggml_custom2_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata) {
|
||||
return ggml_map_custom2_impl(ctx, a, b, fun, n_tasks, userdata, true);
|
||||
}
|
||||
|
||||
// ggml_map_custom3
|
||||
|
||||
struct ggml_map_custom3_op_params {
|
||||
ggml_custom3_op_t fun;
|
||||
int n_tasks;
|
||||
void * userdata;
|
||||
};
|
||||
|
||||
static struct ggml_tensor * ggml_map_custom3_impl(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * c,
|
||||
const ggml_custom3_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata,
|
||||
bool inplace) {
|
||||
GGML_ASSERT(n_tasks == GGML_N_TASKS_MAX || n_tasks > 0);
|
||||
|
||||
bool is_node = false;
|
||||
|
||||
if (!inplace && (a->grad || b->grad || c->grad)) {
|
||||
is_node = true;
|
||||
}
|
||||
|
||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||
|
||||
struct ggml_map_custom3_op_params params = {
|
||||
/*.fun =*/ fun,
|
||||
/*.n_tasks =*/ n_tasks,
|
||||
/*.userdata =*/ userdata
|
||||
};
|
||||
ggml_set_op_params(result, (const void *) ¶ms, sizeof(params));
|
||||
|
||||
result->op = GGML_OP_MAP_CUSTOM3;
|
||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||
result->src[0] = a;
|
||||
result->src[1] = b;
|
||||
result->src[2] = c;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_map_custom3(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * c,
|
||||
const ggml_custom3_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata) {
|
||||
return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, false);
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_map_custom3_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * c,
|
||||
const ggml_custom3_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata) {
|
||||
return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// ggml_cross_entropy_loss
|
||||
|
||||
struct ggml_tensor * ggml_cross_entropy_loss(
|
||||
@@ -9309,8 +9494,8 @@ static void ggml_compute_forward_sum_rows_f32(
|
||||
for (int64_t i3 = 0; i3 < ne03; i3++) {
|
||||
for (int64_t i2 = 0; i2 < ne02; i2++) {
|
||||
for (int64_t i1 = 0; i1 < ne01; i1++) {
|
||||
float* src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
|
||||
float* dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
|
||||
float * src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
|
||||
float * dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
|
||||
float row_sum = 0;
|
||||
ggml_vec_sum_f32(ne00, &row_sum, src_row);
|
||||
dst_row[0] = row_sum;
|
||||
@@ -10572,72 +10757,96 @@ static void ggml_compute_forward_mul_mat(
|
||||
return;
|
||||
}
|
||||
|
||||
// parallelize by src0 rows
|
||||
const int64_t dr = (ne01 + nth - 1)/nth;
|
||||
|
||||
const int64_t ir10 = dr*ith;
|
||||
const int64_t ir11 = MIN(ir10 + dr, ne01);
|
||||
|
||||
// src1 rows
|
||||
const int64_t nr1 = ne11*ne12*ne13;
|
||||
|
||||
const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
|
||||
const size_t row_size = ne10*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
|
||||
|
||||
for (int64_t ir1 = 0; ir1 < nr1; ++ir1) {
|
||||
const int64_t i13 = (ir1/(ne12*ne11));
|
||||
const int64_t i12 = (ir1 - i13*ne12*ne11)/ne11;
|
||||
const int64_t i11 = (ir1 - i13*ne12*ne11 - i12*ne11);
|
||||
const int64_t nr0 = ne01; // src0 rows
|
||||
const int64_t nr1 = ne11*ne12*ne13; // src1 rows
|
||||
|
||||
const int64_t ir0 = (ir1/ne11)%(ne02*ne03);
|
||||
const int64_t i03 = (ir0/(ne02));
|
||||
// Hack for "Falcon multi-query-attention key stutter" / alternative to ggml_repeat2.
|
||||
// See https://github.com/ggerganov/llama.cpp/issues/1602#issuecomment-1606087470:
|
||||
// GG: this is likely the correct way to broadcast, though need some more thought
|
||||
// therefore leaving the comments to remind us for now
|
||||
const int64_t i02 = (i12 / (ne12 / ne02));
|
||||
// Original from PR/224 (and also essential/correct for non-broadcast matmuls in Falcon)
|
||||
// const int64_t i02 = (ir0 - i03*ne02);
|
||||
//printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1);
|
||||
|
||||
const int64_t i1 = i11;
|
||||
const int64_t i2 = i12;
|
||||
const int64_t i3 = i13;
|
||||
// distribute the thread work across the inner or outer loop based on which one is larger
|
||||
|
||||
const char * src0_row = (const char *) src0->data + ( 0 + i02*nb02 + i03*nb03 );
|
||||
const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
||||
const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
|
||||
|
||||
// desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
|
||||
// if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
|
||||
// the original src1 data pointer, so we should index using the indices directly
|
||||
// TODO: this is a bit of a hack, we should probably have a better way to handle this
|
||||
const char * src1_col = (const char *) wdata +
|
||||
(src1_cont || src1->type != vec_dot_type
|
||||
? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
|
||||
: (i11*nb11 + i12*nb12 + i13*nb13));
|
||||
const int64_t ith0 = ith % nth0;
|
||||
const int64_t ith1 = ith / nth0;
|
||||
|
||||
float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
|
||||
const int64_t dr0 = (nr0 + nth0 - 1)/nth0;
|
||||
const int64_t dr1 = (nr1 + nth1 - 1)/nth1;
|
||||
|
||||
for (int64_t ir = ir10; ir < ir11; ++ir) {
|
||||
vec_dot(ne00, &dst_col[ir], src0_row + ir*nb01, src1_col);
|
||||
}
|
||||
const int64_t ir010 = dr0*ith0;
|
||||
const int64_t ir011 = MIN(ir010 + dr0, nr0);
|
||||
|
||||
const int64_t ir110 = dr1*ith1;
|
||||
const int64_t ir111 = MIN(ir110 + dr1, nr1);
|
||||
|
||||
//printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111);
|
||||
|
||||
// threads with no work simply yield (not sure if it helps)
|
||||
if (ir010 >= ir011 || ir110 >= ir111) {
|
||||
sched_yield();
|
||||
return;
|
||||
}
|
||||
|
||||
//int64_t t1 = ggml_time_us();
|
||||
//static int64_t acc = 0;
|
||||
//acc += t1 - t0;
|
||||
//if (t1 - t0 > 10) {
|
||||
// printf("\n");
|
||||
// printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
|
||||
// printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
|
||||
// printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
|
||||
assert(ne12 % ne02 == 0);
|
||||
assert(ne13 % ne03 == 0);
|
||||
|
||||
// printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
|
||||
//}
|
||||
// broadcast factors
|
||||
const int64_t r2 = ne12/ne02;
|
||||
const int64_t r3 = ne13/ne03;
|
||||
|
||||
// block-tiling attempt
|
||||
const int64_t blck_0 = 16;
|
||||
const int64_t blck_1 = 16;
|
||||
|
||||
// attempt to reduce false-sharing (does not seem to make a difference)
|
||||
float tmp[16];
|
||||
|
||||
for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) {
|
||||
for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) {
|
||||
for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ++ir1) {
|
||||
const int64_t i13 = (ir1/(ne12*ne11));
|
||||
const int64_t i12 = (ir1 - i13*ne12*ne11)/ne11;
|
||||
const int64_t i11 = (ir1 - i13*ne12*ne11 - i12*ne11);
|
||||
|
||||
// broadcast src0 into src1
|
||||
const int64_t i03 = i13/r3;
|
||||
const int64_t i02 = i12/r2;
|
||||
|
||||
const int64_t i1 = i11;
|
||||
const int64_t i2 = i12;
|
||||
const int64_t i3 = i13;
|
||||
|
||||
const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03);
|
||||
|
||||
// desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
|
||||
// if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
|
||||
// the original src1 data pointer, so we should index using the indices directly
|
||||
// TODO: this is a bit of a hack, we should probably have a better way to handle this
|
||||
const char * src1_col = (const char *) wdata +
|
||||
(src1_cont || src1->type != vec_dot_type
|
||||
? (i11 + i12*ne11 + i13*ne12*ne11)*row_size
|
||||
: (i11*nb11 + i12*nb12 + i13*nb13));
|
||||
|
||||
float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3));
|
||||
|
||||
//for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
|
||||
// vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
|
||||
//}
|
||||
|
||||
for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
|
||||
vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
|
||||
}
|
||||
memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// ggml_compute_forward_out_prod
|
||||
|
||||
|
||||
static void ggml_compute_forward_out_prod_f32(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
@@ -12920,7 +13129,7 @@ static void ggml_compute_forward_pool_1d(
|
||||
const struct ggml_tensor * src0,
|
||||
struct ggml_tensor * dst) {
|
||||
|
||||
const int32_t* opts = (const int32_t*)dst->op_params;
|
||||
const int32_t * opts = (const int32_t *)dst->op_params;
|
||||
enum ggml_op_pool op = opts[0];
|
||||
const int k0 = opts[1];
|
||||
const int s0 = opts[2];
|
||||
@@ -14253,24 +14462,6 @@ static void ggml_compute_forward_map_custom1_f32(
|
||||
fun(dst, a);
|
||||
}
|
||||
|
||||
|
||||
static void ggml_compute_forward_map_custom1(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * a,
|
||||
struct ggml_tensor * dst,
|
||||
const ggml_custom1_op_f32_t fun) {
|
||||
switch (a->type) {
|
||||
case GGML_TYPE_F32:
|
||||
{
|
||||
ggml_compute_forward_map_custom1_f32(params, a, dst, fun);
|
||||
} break;
|
||||
default:
|
||||
{
|
||||
GGML_ASSERT(false);
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
// ggml_compute_forward_map_custom2
|
||||
|
||||
static void ggml_compute_forward_map_custom2_f32(
|
||||
@@ -14289,24 +14480,6 @@ static void ggml_compute_forward_map_custom2_f32(
|
||||
}
|
||||
|
||||
|
||||
static void ggml_compute_forward_map_custom2(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * a,
|
||||
const struct ggml_tensor * b,
|
||||
struct ggml_tensor * dst,
|
||||
const ggml_custom2_op_f32_t fun) {
|
||||
switch (a->type) {
|
||||
case GGML_TYPE_F32:
|
||||
{
|
||||
ggml_compute_forward_map_custom2_f32(params, a, b, dst, fun);
|
||||
} break;
|
||||
default:
|
||||
{
|
||||
GGML_ASSERT(false);
|
||||
} break;
|
||||
}
|
||||
}
|
||||
|
||||
// ggml_compute_forward_map_custom3
|
||||
|
||||
static void ggml_compute_forward_map_custom3_f32(
|
||||
@@ -14325,24 +14498,52 @@ static void ggml_compute_forward_map_custom3_f32(
|
||||
fun(dst, a, b, c);
|
||||
}
|
||||
|
||||
// ggml_compute_forward_map_custom1
|
||||
|
||||
static void ggml_compute_forward_map_custom1(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * a,
|
||||
struct ggml_tensor * dst) {
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) dst->op_params;
|
||||
|
||||
p->fun(dst, a, params->ith, params->nth, p->userdata);
|
||||
}
|
||||
|
||||
// ggml_compute_forward_map_custom2
|
||||
|
||||
static void ggml_compute_forward_map_custom2(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * a,
|
||||
const struct ggml_tensor * b,
|
||||
struct ggml_tensor * dst) {
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) dst->op_params;
|
||||
|
||||
p->fun(dst, a, b, params->ith, params->nth, p->userdata);
|
||||
}
|
||||
|
||||
// ggml_compute_forward_map_custom3
|
||||
|
||||
static void ggml_compute_forward_map_custom3(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * a,
|
||||
const struct ggml_tensor * b,
|
||||
const struct ggml_tensor * c,
|
||||
struct ggml_tensor * dst,
|
||||
const ggml_custom3_op_f32_t fun) {
|
||||
switch (a->type) {
|
||||
case GGML_TYPE_F32:
|
||||
{
|
||||
ggml_compute_forward_map_custom3_f32(params, a, b, c, dst, fun);
|
||||
} break;
|
||||
default:
|
||||
{
|
||||
GGML_ASSERT(false);
|
||||
} break;
|
||||
struct ggml_tensor * dst) {
|
||||
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) dst->op_params;
|
||||
|
||||
p->fun(dst, a, b, c, params->ith, params->nth, p->userdata);
|
||||
}
|
||||
|
||||
// ggml_compute_forward_cross_entropy_loss
|
||||
@@ -14864,25 +15065,40 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
||||
ggml_compute_forward_map_binary(params, tensor->src[0], tensor->src[1], tensor, fun);
|
||||
}
|
||||
break;
|
||||
case GGML_OP_MAP_CUSTOM1:
|
||||
case GGML_OP_MAP_CUSTOM1_F32:
|
||||
{
|
||||
ggml_custom1_op_f32_t fun;
|
||||
memcpy(&fun, tensor->op_params, sizeof(fun));
|
||||
ggml_compute_forward_map_custom1(params, tensor->src[0], tensor, fun);
|
||||
ggml_compute_forward_map_custom1_f32(params, tensor->src[0], tensor, fun);
|
||||
}
|
||||
break;
|
||||
case GGML_OP_MAP_CUSTOM2_F32:
|
||||
{
|
||||
ggml_custom2_op_f32_t fun;
|
||||
memcpy(&fun, tensor->op_params, sizeof(fun));
|
||||
ggml_compute_forward_map_custom2_f32(params, tensor->src[0], tensor->src[1], tensor, fun);
|
||||
}
|
||||
break;
|
||||
case GGML_OP_MAP_CUSTOM3_F32:
|
||||
{
|
||||
ggml_custom3_op_f32_t fun;
|
||||
memcpy(&fun, tensor->op_params, sizeof(fun));
|
||||
ggml_compute_forward_map_custom3_f32(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
|
||||
}
|
||||
break;
|
||||
case GGML_OP_MAP_CUSTOM1:
|
||||
{
|
||||
ggml_compute_forward_map_custom1(params, tensor->src[0], tensor);
|
||||
}
|
||||
break;
|
||||
case GGML_OP_MAP_CUSTOM2:
|
||||
{
|
||||
ggml_custom2_op_f32_t fun;
|
||||
memcpy(&fun, tensor->op_params, sizeof(fun));
|
||||
ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor, fun);
|
||||
ggml_compute_forward_map_custom2(params, tensor->src[0], tensor->src[1], tensor);
|
||||
}
|
||||
break;
|
||||
case GGML_OP_MAP_CUSTOM3:
|
||||
{
|
||||
ggml_custom3_op_f32_t fun;
|
||||
memcpy(&fun, tensor->op_params, sizeof(fun));
|
||||
ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor, fun);
|
||||
ggml_compute_forward_map_custom3(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
|
||||
}
|
||||
break;
|
||||
case GGML_OP_CROSS_ENTROPY_LOSS:
|
||||
@@ -15690,6 +15906,9 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
||||
} break;
|
||||
case GGML_OP_MAP_UNARY:
|
||||
case GGML_OP_MAP_BINARY:
|
||||
case GGML_OP_MAP_CUSTOM1_F32:
|
||||
case GGML_OP_MAP_CUSTOM2_F32:
|
||||
case GGML_OP_MAP_CUSTOM3_F32:
|
||||
case GGML_OP_MAP_CUSTOM1:
|
||||
case GGML_OP_MAP_CUSTOM2:
|
||||
case GGML_OP_MAP_CUSTOM3:
|
||||
@@ -16475,12 +16694,39 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
|
||||
case GGML_OP_WIN_UNPART:
|
||||
case GGML_OP_MAP_UNARY:
|
||||
case GGML_OP_MAP_BINARY:
|
||||
case GGML_OP_MAP_CUSTOM1:
|
||||
case GGML_OP_MAP_CUSTOM2:
|
||||
case GGML_OP_MAP_CUSTOM3:
|
||||
case GGML_OP_MAP_CUSTOM1_F32:
|
||||
case GGML_OP_MAP_CUSTOM2_F32:
|
||||
case GGML_OP_MAP_CUSTOM3_F32:
|
||||
{
|
||||
n_tasks = 1;
|
||||
} break;
|
||||
case GGML_OP_MAP_CUSTOM1:
|
||||
{
|
||||
struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
|
||||
if (p->n_tasks == GGML_N_TASKS_MAX) {
|
||||
n_tasks = n_threads;
|
||||
} else {
|
||||
n_tasks = MIN(p->n_tasks, n_threads);
|
||||
}
|
||||
} break;
|
||||
case GGML_OP_MAP_CUSTOM2:
|
||||
{
|
||||
struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
|
||||
if (p->n_tasks == GGML_N_TASKS_MAX) {
|
||||
n_tasks = n_threads;
|
||||
} else {
|
||||
n_tasks = MIN(p->n_tasks, n_threads);
|
||||
}
|
||||
} break;
|
||||
case GGML_OP_MAP_CUSTOM3:
|
||||
{
|
||||
struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
|
||||
if (p->n_tasks == GGML_N_TASKS_MAX) {
|
||||
n_tasks = n_threads;
|
||||
} else {
|
||||
n_tasks = MIN(p->n_tasks, n_threads);
|
||||
}
|
||||
} break;
|
||||
case GGML_OP_CROSS_ENTROPY_LOSS:
|
||||
{
|
||||
n_tasks = n_threads;
|
182
llm/ggml.go
Normal file
182
llm/ggml.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package llm
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type ModelFamily string
|
||||
|
||||
type ModelType uint32
|
||||
|
||||
const (
|
||||
ModelType3B ModelType = 26
|
||||
ModelType7B ModelType = 32
|
||||
ModelType13B ModelType = 40
|
||||
ModelType30B ModelType = 60
|
||||
ModelType65B ModelType = 80
|
||||
)
|
||||
|
||||
func (mt ModelType) String() string {
|
||||
switch mt {
|
||||
case ModelType3B:
|
||||
return "3B"
|
||||
case ModelType7B:
|
||||
return "7B"
|
||||
case ModelType13B:
|
||||
return "13B"
|
||||
case ModelType30B:
|
||||
return "30B"
|
||||
case ModelType65B:
|
||||
return "65B"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type FileType interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
type GGML struct {
|
||||
magic uint32
|
||||
container
|
||||
model
|
||||
}
|
||||
|
||||
type model interface {
|
||||
ModelFamily() ModelFamily
|
||||
ModelType() ModelType
|
||||
FileType() FileType
|
||||
}
|
||||
|
||||
type container interface {
|
||||
Name() string
|
||||
Decode(io.Reader) error
|
||||
}
|
||||
|
||||
type containerGGML struct {
|
||||
}
|
||||
|
||||
func (c *containerGGML) Name() string {
|
||||
return "ggml"
|
||||
}
|
||||
|
||||
func (c *containerGGML) Decode(r io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type containerGGMF struct {
|
||||
version uint32
|
||||
}
|
||||
|
||||
func (c *containerGGMF) Name() string {
|
||||
return "ggmf"
|
||||
}
|
||||
|
||||
func (c *containerGGMF) Decode(r io.Reader) error {
|
||||
var version uint32
|
||||
binary.Read(r, binary.LittleEndian, &version)
|
||||
|
||||
switch version {
|
||||
case 1:
|
||||
default:
|
||||
return errors.New("invalid version")
|
||||
}
|
||||
|
||||
c.version = version
|
||||
return nil
|
||||
}
|
||||
|
||||
type containerGGJT struct {
|
||||
version uint32
|
||||
}
|
||||
|
||||
func (c *containerGGJT) Name() string {
|
||||
return "ggjt"
|
||||
}
|
||||
|
||||
func (c *containerGGJT) Decode(r io.Reader) error {
|
||||
var version uint32
|
||||
binary.Read(r, binary.LittleEndian, &version)
|
||||
|
||||
switch version {
|
||||
case 1, 2, 3:
|
||||
default:
|
||||
return errors.New("invalid version")
|
||||
}
|
||||
|
||||
c.version = version
|
||||
return nil
|
||||
}
|
||||
|
||||
type containerLORA struct {
|
||||
version uint32
|
||||
}
|
||||
|
||||
func (c *containerLORA) Name() string {
|
||||
return "ggla"
|
||||
}
|
||||
|
||||
func (c *containerLORA) Decode(r io.Reader) error {
|
||||
var version uint32
|
||||
binary.Read(r, binary.LittleEndian, &version)
|
||||
|
||||
switch version {
|
||||
case 1:
|
||||
default:
|
||||
return errors.New("invalid version")
|
||||
}
|
||||
|
||||
c.version = version
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// / Magic constant for `ggml` files (unversioned).
|
||||
FILE_MAGIC_GGML = 0x67676d6c
|
||||
// / Magic constant for `ggml` files (versioned, ggmf).
|
||||
FILE_MAGIC_GGMF = 0x67676d66
|
||||
// / Magic constant for `ggml` files (versioned, ggjt).
|
||||
FILE_MAGIC_GGJT = 0x67676a74
|
||||
// / Magic constant for `ggla` files (LoRA adapter).
|
||||
FILE_MAGIC_GGLA = 0x67676C61
|
||||
)
|
||||
|
||||
func DecodeGGML(r io.ReadSeeker, hint ModelFamily) (*GGML, error) {
|
||||
var ggml GGML
|
||||
binary.Read(r, binary.LittleEndian, &ggml.magic)
|
||||
|
||||
switch ggml.magic {
|
||||
case FILE_MAGIC_GGML:
|
||||
ggml.container = &containerGGML{}
|
||||
case FILE_MAGIC_GGMF:
|
||||
ggml.container = &containerGGMF{}
|
||||
case FILE_MAGIC_GGJT:
|
||||
ggml.container = &containerGGJT{}
|
||||
case FILE_MAGIC_GGLA:
|
||||
ggml.container = &containerLORA{}
|
||||
default:
|
||||
return nil, errors.New("invalid file magic")
|
||||
}
|
||||
|
||||
if err := ggml.Decode(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// different model types may have different layouts for hyperparameters
|
||||
switch hint {
|
||||
case ModelFamilyLlama:
|
||||
var llama llamaModel
|
||||
binary.Read(r, binary.LittleEndian, &llama.hyperparameters)
|
||||
ggml.model = &llama
|
||||
// TODO: sanity check hyperparameters
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported model type: %s", hint)
|
||||
}
|
||||
|
||||
// final model type
|
||||
return &ggml, nil
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
@@ -209,6 +209,15 @@
|
||||
# define GGML_API
|
||||
#endif
|
||||
|
||||
// TODO: support for clang
|
||||
#ifdef __GNUC__
|
||||
# define GGML_DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
|
||||
#elif defined(_MSC_VER)
|
||||
# define GGML_DEPRECATED(func, hint) __declspec(deprecated(hint)) func
|
||||
#else
|
||||
# define GGML_DEPRECATED(func, hint) func
|
||||
#endif
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#include <stdbool.h>
|
||||
@@ -400,6 +409,10 @@ extern "C" {
|
||||
GGML_OP_MAP_UNARY,
|
||||
GGML_OP_MAP_BINARY,
|
||||
|
||||
GGML_OP_MAP_CUSTOM1_F32,
|
||||
GGML_OP_MAP_CUSTOM2_F32,
|
||||
GGML_OP_MAP_CUSTOM3_F32,
|
||||
|
||||
GGML_OP_MAP_CUSTOM1,
|
||||
GGML_OP_MAP_CUSTOM2,
|
||||
GGML_OP_MAP_CUSTOM3,
|
||||
@@ -596,6 +609,8 @@ extern "C" {
|
||||
GGML_API bool ggml_is_contiguous(const struct ggml_tensor * tensor);
|
||||
GGML_API bool ggml_is_permuted (const struct ggml_tensor * tensor);
|
||||
|
||||
GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
|
||||
|
||||
// use this to compute the memory overhead of a tensor
|
||||
GGML_API size_t ggml_tensor_overhead(void);
|
||||
|
||||
@@ -1266,7 +1281,7 @@ extern "C" {
|
||||
|
||||
// conv_1d with padding = half
|
||||
// alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
|
||||
GGML_API struct ggml_tensor* ggml_conv_1d_ph(
|
||||
GGML_API struct ggml_tensor * ggml_conv_1d_ph(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
@@ -1279,7 +1294,7 @@ extern "C" {
|
||||
GGML_OP_POOL_COUNT,
|
||||
};
|
||||
|
||||
GGML_API struct ggml_tensor* ggml_pool_1d(
|
||||
GGML_API struct ggml_tensor * ggml_pool_1d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
enum ggml_op_pool op,
|
||||
@@ -1287,7 +1302,7 @@ extern "C" {
|
||||
int s0, // stride
|
||||
int p0); // padding
|
||||
|
||||
GGML_API struct ggml_tensor* ggml_pool_2d(
|
||||
GGML_API struct ggml_tensor * ggml_pool_2d(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
enum ggml_op_pool op,
|
||||
@@ -1341,15 +1356,6 @@ extern "C" {
|
||||
int h0,
|
||||
int w);
|
||||
|
||||
// custom operators
|
||||
|
||||
typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
|
||||
typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
|
||||
|
||||
typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
|
||||
typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
|
||||
typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_unary(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
@@ -1360,63 +1366,138 @@ extern "C" {
|
||||
struct ggml_tensor * a,
|
||||
enum ggml_unary_op op);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_unary_f32(
|
||||
// custom operators
|
||||
|
||||
typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
|
||||
typedef void (*ggml_binary_op_f32_t)(const int, float *, const float *, const float *);
|
||||
|
||||
typedef void (*ggml_custom1_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *);
|
||||
typedef void (*ggml_custom2_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
|
||||
typedef void (*ggml_custom3_op_f32_t)(struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *, const struct ggml_tensor *);
|
||||
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
ggml_unary_op_f32_t fun);
|
||||
ggml_unary_op_f32_t fun),
|
||||
"use ggml_map_custom1 instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_unary_inplace_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
ggml_unary_op_f32_t fun);
|
||||
ggml_unary_op_f32_t fun),
|
||||
"use ggml_map_custom1_inplace instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_binary_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
ggml_binary_op_f32_t fun);
|
||||
ggml_binary_op_f32_t fun),
|
||||
"use ggml_map_custom2 instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_binary_inplace_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
ggml_binary_op_f32_t fun);
|
||||
ggml_binary_op_f32_t fun),
|
||||
"use ggml_map_custom2_inplace instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom1_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
ggml_custom1_op_f32_t fun);
|
||||
ggml_custom1_op_f32_t fun),
|
||||
"use ggml_map_custom1 instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom1_inplace_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
ggml_custom1_op_f32_t fun);
|
||||
ggml_custom1_op_f32_t fun),
|
||||
"use ggml_map_custom1_inplace instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom2_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
ggml_custom2_op_f32_t fun);
|
||||
ggml_custom2_op_f32_t fun),
|
||||
"use ggml_map_custom2 instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom2_inplace_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
ggml_custom2_op_f32_t fun);
|
||||
ggml_custom2_op_f32_t fun),
|
||||
"use ggml_map_custom2_inplace instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom3_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * c,
|
||||
ggml_custom3_op_f32_t fun);
|
||||
ggml_custom3_op_f32_t fun),
|
||||
"use ggml_map_custom3 instead");
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
|
||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_map_custom3_inplace_f32(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * c,
|
||||
ggml_custom3_op_f32_t fun);
|
||||
ggml_custom3_op_f32_t fun),
|
||||
"use ggml_map_custom3_inplace instead");
|
||||
|
||||
// custom operators v2
|
||||
|
||||
typedef void (*ggml_custom1_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, int ith, int nth, void * userdata);
|
||||
typedef void (*ggml_custom2_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, int ith, int nth, void * userdata);
|
||||
typedef void (*ggml_custom3_op_t)(struct ggml_tensor * dst , const struct ggml_tensor * a, const struct ggml_tensor * b, const struct ggml_tensor * c, int ith, int nth, void * userdata);
|
||||
|
||||
#define GGML_N_TASKS_MAX -1
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom1(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
ggml_custom1_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom1_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
ggml_custom1_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom2(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
ggml_custom2_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom2_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
ggml_custom2_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom3(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * c,
|
||||
ggml_custom3_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata);
|
||||
|
||||
GGML_API struct ggml_tensor * ggml_map_custom3_inplace(
|
||||
struct ggml_context * ctx,
|
||||
struct ggml_tensor * a,
|
||||
struct ggml_tensor * b,
|
||||
struct ggml_tensor * c,
|
||||
ggml_custom3_op_t fun,
|
||||
int n_tasks,
|
||||
void * userdata);
|
||||
|
||||
// loss function
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
@@ -175,6 +175,46 @@ struct llama_file {
|
||||
}
|
||||
};
|
||||
|
||||
// llama_context_data
|
||||
struct llama_data_context {
|
||||
virtual void write(const void * src, size_t size) = 0;
|
||||
virtual size_t get_size_written() = 0;
|
||||
virtual ~llama_data_context() = default;
|
||||
};
|
||||
|
||||
struct llama_data_buffer_context : llama_data_context {
|
||||
uint8_t* ptr;
|
||||
size_t size_written = 0;
|
||||
|
||||
llama_data_buffer_context(uint8_t * p) : ptr(p) {}
|
||||
|
||||
void write(const void * src, size_t size) override {
|
||||
memcpy(ptr, src, size);
|
||||
ptr += size;
|
||||
size_written += size;
|
||||
}
|
||||
|
||||
size_t get_size_written() override {
|
||||
return size_written;
|
||||
}
|
||||
};
|
||||
|
||||
struct llama_data_file_context : llama_data_context {
|
||||
llama_file* file;
|
||||
size_t size_written = 0;
|
||||
|
||||
llama_data_file_context(llama_file * f) : file(f) {}
|
||||
|
||||
void write(const void * src, size_t size) override {
|
||||
file->write_raw(src, size);
|
||||
size_written += size;
|
||||
}
|
||||
|
||||
size_t get_size_written() override {
|
||||
return size_written;
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(_WIN32)
|
||||
static std::string llama_format_win_err(DWORD err) {
|
||||
LPSTR buf;
|
||||
@@ -205,7 +245,7 @@ struct llama_mmap {
|
||||
// prefetch/readahead impairs performance on NUMA systems
|
||||
if (numa) { prefetch = 0; }
|
||||
#ifdef __linux__
|
||||
if (prefetch) { flags |= MAP_POPULATE; }
|
||||
if (prefetch >= file->size) { flags |= MAP_POPULATE; }
|
||||
#endif
|
||||
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
||||
if (addr == MAP_FAILED) {
|
||||
@@ -257,20 +297,29 @@ struct llama_mmap {
|
||||
throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
|
||||
}
|
||||
|
||||
#if _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
if (prefetch) {
|
||||
// Advise the kernel to preload the mapped memory
|
||||
WIN32_MEMORY_RANGE_ENTRY range;
|
||||
range.VirtualAddress = addr;
|
||||
range.NumberOfBytes = (SIZE_T)size;
|
||||
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
// The PrefetchVirtualMemory API is only present on Windows 8 and above, so we
|
||||
// will dynamically load it using GetProcAddress.
|
||||
BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
|
||||
HMODULE hKernel32;
|
||||
|
||||
// This call is guaranteed to succeed.
|
||||
hKernel32 = GetModuleHandleW(L"kernel32.dll");
|
||||
|
||||
// This call may fail if on a pre-Win8 system.
|
||||
pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
|
||||
|
||||
if (pPrefetchVirtualMemory) {
|
||||
// Advise the kernel to preload the mapped memory.
|
||||
WIN32_MEMORY_RANGE_ENTRY range;
|
||||
range.VirtualAddress = addr;
|
||||
range.NumberOfBytes = (SIZE_T)size;
|
||||
if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
#pragma message("warning: You are building for pre-Windows 8; prefetch not supported")
|
||||
#endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8
|
||||
}
|
||||
|
||||
~llama_mmap() {
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
@@ -82,6 +82,13 @@
|
||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
static void llama_log_internal(llama_log_level level, const char* format, ...);
|
||||
static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
|
||||
#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
|
||||
#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
|
||||
#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
|
||||
|
||||
|
||||
#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL)
|
||||
#include "ggml-alloc.h"
|
||||
#define LLAMA_USE_ALLOCATOR
|
||||
@@ -175,7 +182,7 @@ static const std::map<e_model, size_t> & MEM_REQ_EVAL()
|
||||
}
|
||||
|
||||
// amount of VRAM needed per batch size to hold temporary results
|
||||
// the values for 3b and 65b are not derived from testing but instead chosen conservatively
|
||||
// the values for 3b are not derived from testing but instead chosen conservatively
|
||||
static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_BASE()
|
||||
{
|
||||
static std::map<e_model, size_t> k_sizes = {
|
||||
@@ -183,14 +190,14 @@ static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_BASE()
|
||||
{ MODEL_7B, 512ull * kB },
|
||||
{ MODEL_13B, 640ull * kB },
|
||||
{ MODEL_30B, 768ull * kB },
|
||||
{ MODEL_65B, 1536ull * kB },
|
||||
{ MODEL_70B, 1536ull * kB }, // TODO (likely can be reduced)
|
||||
{ MODEL_65B, 1280ull * kB },
|
||||
{ MODEL_70B, 1280ull * kB },
|
||||
};
|
||||
return k_sizes;
|
||||
}
|
||||
|
||||
// amount of VRAM needed per batch size and context to hold temporary results
|
||||
// the values for 3b and 65b are not derived from testing but instead chosen conservatively
|
||||
// the values for 3b are not derived from testing but instead chosen conservatively
|
||||
static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_PER_CONTEXT()
|
||||
{
|
||||
static std::map<e_model, size_t> k_sizes = {
|
||||
@@ -198,8 +205,8 @@ static const std::map<e_model, size_t> & VRAM_REQ_SCRATCH_PER_CONTEXT()
|
||||
{ MODEL_7B, 128ull },
|
||||
{ MODEL_13B, 160ull },
|
||||
{ MODEL_30B, 208ull },
|
||||
{ MODEL_65B, 416ull },
|
||||
{ MODEL_70B, 416ull }, // TODO (likely can be reduced)
|
||||
{ MODEL_65B, 256ull },
|
||||
{ MODEL_70B, 256ull },
|
||||
};
|
||||
return k_sizes;
|
||||
}
|
||||
@@ -464,6 +471,14 @@ struct llama_context {
|
||||
}
|
||||
};
|
||||
|
||||
struct llama_state {
|
||||
// We save the log callback globally
|
||||
llama_log_callback log_callback = llama_log_callback_default;
|
||||
void * log_callback_user_data = nullptr;
|
||||
};
|
||||
// global state
|
||||
static llama_state g_state;
|
||||
|
||||
template <typename T>
|
||||
static T checked_mul(T a, T b) {
|
||||
T ret = a * b;
|
||||
@@ -530,7 +545,7 @@ struct llama_file_loader {
|
||||
|
||||
llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map)
|
||||
: file(fname, "rb") {
|
||||
fprintf(stderr, "llama.cpp: loading model from %s\n", fname);
|
||||
LLAMA_LOG_INFO("llama.cpp: loading model from %s\n", fname);
|
||||
read_magic();
|
||||
read_hparams();
|
||||
read_vocab();
|
||||
@@ -645,7 +660,7 @@ struct llama_file_saver {
|
||||
llama_file_loader * any_file_loader;
|
||||
llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
|
||||
: file(fname, "wb"), any_file_loader(any_file_loader) {
|
||||
fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
|
||||
LLAMA_LOG_INFO("llama.cpp: saving model to %s\n", fname);
|
||||
write_magic();
|
||||
write_hparams(new_ftype);
|
||||
write_vocab();
|
||||
@@ -666,7 +681,7 @@ struct llama_file_saver {
|
||||
}
|
||||
void write_vocab() {
|
||||
if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
|
||||
fprintf(stderr, "llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
|
||||
LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
|
||||
}
|
||||
uint32_t n_vocab = any_file_loader->hparams.n_vocab;
|
||||
for (uint32_t i = 0; i < n_vocab; i++) {
|
||||
@@ -773,12 +788,12 @@ struct llama_model_loader {
|
||||
|
||||
void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
|
||||
size_t data_size = 0;
|
||||
size_t prefetch_size = 0;
|
||||
size_t prefetch_size = file_loader->file.size;
|
||||
size_t lock_size = 0;
|
||||
for (const llama_load_tensor & lt : tensors_map.tensors) {
|
||||
data_size += lt.size;
|
||||
if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
|
||||
prefetch_size += lt.size;
|
||||
if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) {
|
||||
prefetch_size -= lt.size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -857,7 +872,7 @@ struct llama_model_loader {
|
||||
uint8_t byte = lt.data[i];
|
||||
sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
|
||||
}
|
||||
fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
|
||||
LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
|
||||
llama_format_tensor_shape(lt.ne).c_str(), lt.size);
|
||||
}
|
||||
|
||||
@@ -890,7 +905,7 @@ static bool kv_cache_init(
|
||||
cache.ctx = ggml_init(params);
|
||||
|
||||
if (!cache.ctx) {
|
||||
fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
|
||||
LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1102,7 +1117,7 @@ static void llama_model_load_internal(
|
||||
LLAMA_ASSERT(hparams.n_head % n_gqa == 0);
|
||||
hparams.n_head_kv = hparams.n_head / n_gqa;
|
||||
if (model.type == e_model::MODEL_65B && n_gqa == 8) {
|
||||
fprintf(stderr, "%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
|
||||
LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa);
|
||||
model.type = e_model::MODEL_70B;
|
||||
hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model
|
||||
}
|
||||
@@ -1118,22 +1133,22 @@ static void llama_model_load_internal(
|
||||
//const uint32_t n_ff = 28672;
|
||||
|
||||
{
|
||||
fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version));
|
||||
fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab);
|
||||
fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx);
|
||||
fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd);
|
||||
fprintf(stderr, "%s: n_mult = %u\n", __func__, hparams.n_mult);
|
||||
fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head);
|
||||
fprintf(stderr, "%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
|
||||
fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
|
||||
fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
|
||||
fprintf(stderr, "%s: n_gqa = %u\n", __func__, hparams.n_gqa());
|
||||
fprintf(stderr, "%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
|
||||
fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
|
||||
fprintf(stderr, "%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
|
||||
fprintf(stderr, "%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
|
||||
fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
|
||||
fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type));
|
||||
LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(file_version));
|
||||
LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
|
||||
LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
|
||||
LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
|
||||
LLAMA_LOG_INFO("%s: n_mult = %u\n", __func__, hparams.n_mult);
|
||||
LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
|
||||
LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
|
||||
LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
|
||||
LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
|
||||
LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
|
||||
LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps);
|
||||
LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff);
|
||||
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
|
||||
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
|
||||
LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
|
||||
LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type));
|
||||
}
|
||||
|
||||
if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
|
||||
@@ -1161,7 +1176,7 @@ static void llama_model_load_internal(
|
||||
size_t ctx_size;
|
||||
size_t mmapped_size;
|
||||
ml->calc_sizes(&ctx_size, &mmapped_size);
|
||||
fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
|
||||
LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
|
||||
|
||||
// create the ggml context
|
||||
{
|
||||
@@ -1186,13 +1201,13 @@ static void llama_model_load_internal(
|
||||
(void) main_gpu;
|
||||
(void) mul_mat_q;
|
||||
#if defined(GGML_USE_CUBLAS)
|
||||
fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__);
|
||||
LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration\n", __func__);
|
||||
ggml_cuda_set_main_device(main_gpu);
|
||||
ggml_cuda_set_mul_mat_q(mul_mat_q);
|
||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
|
||||
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
|
||||
#elif defined(GGML_USE_CLBLAST)
|
||||
fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__);
|
||||
LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
|
||||
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
|
||||
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
|
||||
#else
|
||||
@@ -1297,14 +1312,14 @@ static void llama_model_load_internal(
|
||||
const size_t mem_required_state =
|
||||
scale*hparams.kv_size();
|
||||
|
||||
fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
|
||||
LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
|
||||
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
|
||||
|
||||
(void) vram_scratch;
|
||||
(void) n_batch;
|
||||
#ifdef GGML_USE_CUBLAS
|
||||
if (low_vram) {
|
||||
fprintf(stderr, "%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
|
||||
LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
|
||||
ggml_cuda_set_scratch_size(0); // disable scratch
|
||||
} else {
|
||||
const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type);
|
||||
@@ -1312,7 +1327,7 @@ static void llama_model_load_internal(
|
||||
vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context);
|
||||
ggml_cuda_set_scratch_size(vram_scratch);
|
||||
if (n_gpu_layers > 0) {
|
||||
fprintf(stderr, "%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
|
||||
LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n",
|
||||
__func__, vram_scratch_base / kB, vram_scratch_per_context,
|
||||
(vram_scratch + MB - 1) / MB); // round up
|
||||
}
|
||||
@@ -1322,9 +1337,9 @@ static void llama_model_load_internal(
|
||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
||||
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
||||
|
||||
fprintf(stderr, "%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
|
||||
LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
|
||||
if (n_gpu_layers > (int) hparams.n_layer) {
|
||||
fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
|
||||
LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
|
||||
}
|
||||
size_t vram_kv_cache = 0;
|
||||
|
||||
@@ -1333,17 +1348,17 @@ static void llama_model_load_internal(
|
||||
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
|
||||
if (n_gpu_layers > (int) hparams.n_layer + 1) {
|
||||
if (low_vram) {
|
||||
fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
||||
LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
||||
} else {
|
||||
fprintf(stderr, "%s: offloading v cache to GPU\n", __func__);
|
||||
LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
|
||||
vram_kv_cache += hparams.kv_size() / 2;
|
||||
}
|
||||
}
|
||||
if (n_gpu_layers > (int) hparams.n_layer + 2) {
|
||||
if (low_vram) {
|
||||
fprintf(stderr, "%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
|
||||
LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
|
||||
} else {
|
||||
fprintf(stderr, "%s: offloading k cache to GPU\n", __func__);
|
||||
LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
|
||||
vram_kv_cache += hparams.kv_size() / 2;
|
||||
}
|
||||
}
|
||||
@@ -1352,9 +1367,9 @@ static void llama_model_load_internal(
|
||||
const int max_offloadable_layers = hparams.n_layer + 1;
|
||||
#endif // GGML_USE_CUBLAS
|
||||
|
||||
fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
|
||||
LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
|
||||
__func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
|
||||
fprintf(stderr, "%s: total VRAM used: %zu MB\n",
|
||||
LLAMA_LOG_INFO("%s: total VRAM used: %zu MB\n",
|
||||
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
|
||||
#else
|
||||
(void) n_gpu_layers;
|
||||
@@ -1413,7 +1428,7 @@ static bool llama_model_load(
|
||||
use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data);
|
||||
return true;
|
||||
} catch (const std::exception & err) {
|
||||
fprintf(stderr, "error loading model: %s\n", err.what());
|
||||
LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -1777,7 +1792,7 @@ static struct ggml_cgraph * llama_build_graph(
|
||||
}
|
||||
|
||||
#if 0
|
||||
printf("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
|
||||
LLAMA_LOG_INFO("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__,
|
||||
ggml_used_mem(ctx0)/1024.0/1024.0,
|
||||
lctx.get_buf_max_mem(0)/1024.0/1024.0,
|
||||
lctx.get_buf_max_mem(1)/1024.0/1024.0,
|
||||
@@ -1838,7 +1853,7 @@ static bool llama_eval_internal(
|
||||
ggml_allocr_alloc_graph(lctx.alloc, gf);
|
||||
#endif
|
||||
|
||||
// fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
||||
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
||||
|
||||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||
@@ -2025,7 +2040,7 @@ struct llama_tokenizer {
|
||||
left_sym.n += right_sym.n;
|
||||
right_sym.n = 0;
|
||||
|
||||
//printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
|
||||
//LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
|
||||
|
||||
// remove the right sym from the chain
|
||||
left_sym.next = right_sym.next;
|
||||
@@ -3033,7 +3048,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
tensor.data = read_data.addr;
|
||||
model_loader->load_data_for(tensor);
|
||||
|
||||
printf("[%4zu/%4zu] %36s - %16s, type = %6s, ",
|
||||
LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ",
|
||||
++idx, model_loader->tensors_map.tensors.size(),
|
||||
tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
|
||||
ggml_type_name(tensor.type));
|
||||
@@ -3055,7 +3070,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
new_type = tensor.type;
|
||||
new_data = tensor.data;
|
||||
new_size = tensor.size;
|
||||
printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
|
||||
LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
|
||||
} else {
|
||||
new_type = quantized_type;
|
||||
#ifdef GGML_USE_K_QUANTS
|
||||
@@ -3090,17 +3105,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
int nx = tensor.ne.at(0);
|
||||
int ny = tensor.ne.at(1);
|
||||
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
||||
fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
|
||||
LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K);
|
||||
convert_incompatible_tensor = true;
|
||||
}
|
||||
}
|
||||
if (convert_incompatible_tensor) {
|
||||
if (tensor.name == "output.weight") {
|
||||
new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
|
||||
fprintf(stderr, "F16 will be used for this tensor instead.\n");
|
||||
LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
|
||||
} else if (tensor.name == "tok_embeddings.weight") {
|
||||
new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
|
||||
fprintf(stderr, "Q4_0 will be used for this tensor instead.\n");
|
||||
LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
|
||||
} else {
|
||||
throw std::runtime_error("Unsupported tensor size encountered\n");
|
||||
}
|
||||
@@ -3120,7 +3135,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
f32_data = (float *) f32_conv_buf.addr;
|
||||
}
|
||||
|
||||
printf("quantizing to %s .. ", ggml_type_name(new_type));
|
||||
LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
|
||||
fflush(stdout);
|
||||
|
||||
work.resize(nelements * 4); // upper bound on size
|
||||
@@ -3170,7 +3185,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
}
|
||||
}
|
||||
|
||||
printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
|
||||
LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
|
||||
int64_t tot_count = 0;
|
||||
for (size_t i = 0; i < hist_cur.size(); i++) {
|
||||
hist_all[i] += hist_cur[i];
|
||||
@@ -3179,18 +3194,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
|
||||
if (tot_count > 0) {
|
||||
for (size_t i = 0; i < hist_cur.size(); i++) {
|
||||
printf("%5.3f ", hist_cur[i] / float(nelements));
|
||||
LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
|
||||
}
|
||||
}
|
||||
printf("\n");
|
||||
LLAMA_LOG_INFO("\n");
|
||||
}
|
||||
total_size_org += tensor.size;
|
||||
total_size_new += new_size;
|
||||
file_saver.write_tensor(tensor, new_type, new_data, new_size);
|
||||
}
|
||||
|
||||
printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
||||
printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
|
||||
LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
|
||||
LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
|
||||
|
||||
{
|
||||
int64_t sum_all = 0;
|
||||
@@ -3199,11 +3214,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
}
|
||||
|
||||
if (sum_all > 0) {
|
||||
printf("%s: hist: ", __func__);
|
||||
LLAMA_LOG_INFO("%s: hist: ", __func__);
|
||||
for (size_t i = 0; i < hist_all.size(); i++) {
|
||||
printf("%5.3f ", hist_all[i] / float(sum_all));
|
||||
LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
|
||||
}
|
||||
printf("\n");
|
||||
LLAMA_LOG_INFO("\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3227,8 +3242,8 @@ struct llama_model * llama_load_model_from_file(
|
||||
params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram,
|
||||
memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback,
|
||||
params.progress_callback_user_data)) {
|
||||
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
|
||||
delete model;
|
||||
fprintf(stderr, "%s: failed to load model\n", __func__);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -3261,10 +3276,9 @@ struct llama_context * llama_new_context_with_model(
|
||||
unsigned percentage = (unsigned) (100 * progress);
|
||||
while (percentage > *cur_percentage_p) {
|
||||
*cur_percentage_p = percentage;
|
||||
fprintf(stderr, ".");
|
||||
fflush(stderr);
|
||||
LLAMA_LOG_INFO(".");
|
||||
if (percentage >= 100) {
|
||||
fprintf(stderr, "\n");
|
||||
LLAMA_LOG_INFO("\n");
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -3278,14 +3292,14 @@ struct llama_context * llama_new_context_with_model(
|
||||
// reserve memory for context buffers
|
||||
if (!params.vocab_only) {
|
||||
if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
|
||||
fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
||||
LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
||||
llama_free(ctx);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
{
|
||||
const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
|
||||
fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
||||
LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
||||
}
|
||||
|
||||
const auto & hparams = ctx->model.hparams;
|
||||
@@ -3319,14 +3333,14 @@ struct llama_context * llama_new_context_with_model(
|
||||
// measure memory requirements for the graph
|
||||
size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
|
||||
|
||||
fprintf(stderr, "%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
|
||||
LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
|
||||
|
||||
// debug - for comparison with scratch buffer
|
||||
//size_t prev_req =
|
||||
// MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) +
|
||||
// MEM_REQ_SCRATCH1().at(ctx->model.type) +
|
||||
// MEM_REQ_EVAL().at(ctx->model.type);
|
||||
//fprintf(stderr, "%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
|
||||
//LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0);
|
||||
|
||||
// recreate allocator with exact memory requirements
|
||||
ggml_allocr_free(ctx->alloc);
|
||||
@@ -3349,6 +3363,12 @@ struct llama_context * llama_new_context_with_model(
|
||||
// this allocates all Metal resources and memory buffers
|
||||
ctx->ctx_metal = ggml_metal_init(1);
|
||||
|
||||
if (!ctx->ctx_metal) {
|
||||
LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
|
||||
llama_free(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void * data_ptr = NULL;
|
||||
size_t data_size = 0;
|
||||
|
||||
@@ -3362,13 +3382,13 @@ struct llama_context * llama_new_context_with_model(
|
||||
|
||||
const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
|
||||
|
||||
fprintf(stderr, "%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
|
||||
LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
|
||||
|
||||
#define LLAMA_METAL_CHECK_BUF(result) \
|
||||
if (!(result)) { \
|
||||
fprintf(stderr, "%s: failed to add buffer\n", __func__); \
|
||||
llama_free(ctx); \
|
||||
return NULL; \
|
||||
#define LLAMA_METAL_CHECK_BUF(result) \
|
||||
if (!(result)) { \
|
||||
LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
|
||||
llama_free(ctx); \
|
||||
return NULL; \
|
||||
}
|
||||
|
||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
|
||||
@@ -3422,19 +3442,19 @@ int llama_model_quantize(
|
||||
llama_model_quantize_internal(fname_inp, fname_out, params);
|
||||
return 0;
|
||||
} catch (const std::exception & err) {
|
||||
fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.what());
|
||||
LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
|
||||
fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
|
||||
LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
|
||||
|
||||
const int64_t t_start_lora_us = ggml_time_us();
|
||||
|
||||
auto fin = std::ifstream(path_lora, std::ios::binary);
|
||||
if (!fin) {
|
||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
|
||||
LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -3443,14 +3463,14 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
uint32_t magic;
|
||||
fin.read((char *) &magic, sizeof(magic));
|
||||
if (magic != LLAMA_FILE_MAGIC_GGLA) {
|
||||
fprintf(stderr, "%s: bad file magic\n", __func__);
|
||||
LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
uint32_t format_version;
|
||||
fin.read((char *) &format_version, sizeof(format_version));
|
||||
|
||||
if (format_version != 1) {
|
||||
fprintf(stderr, "%s: unsupported file version\n", __func__ );
|
||||
LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -3461,7 +3481,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
fin.read((char *) &lora_alpha, sizeof(lora_alpha));
|
||||
float scaling = (float)lora_alpha / (float)lora_r;
|
||||
|
||||
fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
|
||||
LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
|
||||
|
||||
|
||||
// create a temporary ggml context to store the lora tensors
|
||||
@@ -3487,7 +3507,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
ggml_context * base_ctx = NULL;
|
||||
llama_buffer base_buf;
|
||||
if (path_base_model) {
|
||||
fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model);
|
||||
LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
|
||||
model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
|
||||
|
||||
size_t ctx_size;
|
||||
@@ -3544,17 +3564,17 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
const std::string lora_suffix = ".lora";
|
||||
size_t pos = name.rfind(lora_suffix);
|
||||
if (pos == std::string::npos) {
|
||||
fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
|
||||
LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
||||
std::string lora_type = name.substr(pos + lora_suffix.length());
|
||||
std::string base_name = name;
|
||||
base_name.erase(pos);
|
||||
// fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
|
||||
// LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
|
||||
|
||||
if (model_tensors.find(base_name) == model_tensors.end()) {
|
||||
fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
|
||||
LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -3565,7 +3585,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
case 1: wtype = GGML_TYPE_F16; break;
|
||||
default:
|
||||
{
|
||||
fprintf(stderr, "%s: invalid tensor data type '%d'\n",
|
||||
LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
|
||||
__func__, ftype);
|
||||
return false;
|
||||
}
|
||||
@@ -3575,7 +3595,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims);
|
||||
LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
|
||||
return 1;
|
||||
}
|
||||
ggml_set_name(lora_tensor, "lora_tensor");
|
||||
@@ -3613,7 +3633,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
if (model_loader) {
|
||||
// load from base model
|
||||
if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
|
||||
fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
|
||||
LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
|
||||
return 1;
|
||||
}
|
||||
size_t idx = model_loader->tensors_map.name_to_idx[base_name];
|
||||
@@ -3629,8 +3649,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
|
||||
if (ggml_is_quantized(base_t->type)) {
|
||||
if (!warned) {
|
||||
fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, "
|
||||
"use a f16 or f32 base model with --lora-base\n", __func__);
|
||||
LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
|
||||
"use a f16 or f32 base model with --lora-base\n", __func__);
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
@@ -3644,8 +3664,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
ggml_set_name(loraB, "loraB");
|
||||
|
||||
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
|
||||
fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
|
||||
" are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
|
||||
LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
|
||||
" are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -3690,7 +3710,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
|
||||
n_tensors++;
|
||||
if (n_tensors % 4 == 0) {
|
||||
fprintf(stderr, ".");
|
||||
LLAMA_LOG_INFO(".");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3702,7 +3722,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const
|
||||
}
|
||||
|
||||
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
|
||||
fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
|
||||
LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3711,7 +3731,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
|
||||
try {
|
||||
return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
|
||||
} catch (const std::exception & err) {
|
||||
fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
||||
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -3720,7 +3740,7 @@ int llama_model_apply_lora_from_file(const struct llama_model * model, const cha
|
||||
try {
|
||||
return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
|
||||
} catch (const std::exception & err) {
|
||||
fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
||||
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -3769,10 +3789,20 @@ size_t llama_get_state_size(const struct llama_context * ctx) {
|
||||
return s_total;
|
||||
}
|
||||
|
||||
// Copies the state to the specified destination address
|
||||
size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
uint8_t * out = dst;
|
||||
|
||||
/** copy state data into either a buffer or file depending on the passed in context
|
||||
*
|
||||
* file context:
|
||||
* llama_file file("/path", "wb");
|
||||
* llama_data_file_context data_ctx(&file);
|
||||
* llama_copy_state_data(ctx, &data_ctx);
|
||||
*
|
||||
* buffer context:
|
||||
* std::vector<uint8_t> buf(max_size, 0);
|
||||
* llama_data_buffer_context data_ctx(&buf.data());
|
||||
* llama_copy_state_data(ctx, &data_ctx);
|
||||
*
|
||||
*/
|
||||
void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
|
||||
// copy rng
|
||||
{
|
||||
std::stringstream rng_ss;
|
||||
@@ -3784,8 +3814,8 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
|
||||
memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
|
||||
|
||||
memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size);
|
||||
memcpy(out, &rng_buf[0], LLAMA_MAX_RNG_STATE); out += LLAMA_MAX_RNG_STATE;
|
||||
data_ctx->write(&rng_size, sizeof(rng_size));
|
||||
data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
|
||||
}
|
||||
|
||||
// copy logits
|
||||
@@ -3793,25 +3823,29 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
const size_t logits_cap = ctx->logits.capacity();
|
||||
const size_t logits_size = ctx->logits.size();
|
||||
|
||||
memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap);
|
||||
memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size);
|
||||
data_ctx->write(&logits_cap, sizeof(logits_cap));
|
||||
data_ctx->write(&logits_size, sizeof(logits_size));
|
||||
|
||||
if (logits_size) {
|
||||
memcpy(out, ctx->logits.data(), logits_size * sizeof(float));
|
||||
data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
|
||||
}
|
||||
|
||||
out += logits_cap * sizeof(float);
|
||||
// If there is a gap between the size and the capacity, write padding
|
||||
size_t padding_size = (logits_cap - logits_size) * sizeof(float);
|
||||
if (padding_size > 0) {
|
||||
std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
|
||||
data_ctx->write(padding.data(), padding_size);
|
||||
}
|
||||
}
|
||||
|
||||
// copy embeddings
|
||||
{
|
||||
const size_t embedding_size = ctx->embedding.size();
|
||||
|
||||
memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size);
|
||||
data_ctx->write(&embedding_size, sizeof(embedding_size));
|
||||
|
||||
if (embedding_size) {
|
||||
memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float));
|
||||
out += embedding_size * sizeof(float);
|
||||
data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3826,8 +3860,8 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
const size_t kv_size = kv_self.buf.size;
|
||||
const int kv_ntok = llama_get_kv_cache_token_count(ctx);
|
||||
|
||||
memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size);
|
||||
memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok);
|
||||
data_ctx->write(&kv_size, sizeof(kv_size));
|
||||
data_ctx->write(&kv_ntok, sizeof(kv_ntok));
|
||||
|
||||
if (kv_size) {
|
||||
const size_t elt_size = ggml_element_size(kv_self.k);
|
||||
@@ -3836,12 +3870,12 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
ggml_cgraph gf{};
|
||||
|
||||
ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
|
||||
kout3d->data = out;
|
||||
out += ggml_nbytes(kout3d);
|
||||
std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
|
||||
kout3d->data = kout3d_data.data();
|
||||
|
||||
ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
|
||||
vout3d->data = out;
|
||||
out += ggml_nbytes(vout3d);
|
||||
std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
|
||||
vout3d->data = vout3d_data.data();
|
||||
|
||||
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
|
||||
n_embd, kv_ntok, n_layer,
|
||||
@@ -3856,15 +3890,20 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
|
||||
|
||||
ggml_free(cpy_ctx);
|
||||
|
||||
// our data is now in the kout3d_data and vout3d_data buffers
|
||||
// write them to file
|
||||
data_ctx->write(kout3d_data.data(), kout3d_data.size());
|
||||
data_ctx->write(vout3d_data.data(), vout3d_data.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const size_t written = out - dst;
|
||||
const size_t max_size = llama_get_state_size(ctx);
|
||||
size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
|
||||
llama_data_buffer_context data_ctx(dst);
|
||||
llama_copy_state_data_internal(ctx, &data_ctx);
|
||||
|
||||
LLAMA_ASSERT(written <= max_size);
|
||||
|
||||
return written;
|
||||
return data_ctx.get_size_written();
|
||||
}
|
||||
|
||||
// Sets the state reading from the specified source address
|
||||
@@ -3983,7 +4022,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||
const uint32_t version = file.read_u32();
|
||||
|
||||
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
|
||||
fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
|
||||
LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -3991,7 +4030,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||
file.read_raw(&session_hparams, sizeof(llama_hparams));
|
||||
|
||||
if (session_hparams != ctx->model.hparams) {
|
||||
fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__);
|
||||
LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -4001,7 +4040,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||
const uint32_t n_token_count = file.read_u32();
|
||||
|
||||
if (n_token_count > n_token_capacity) {
|
||||
fprintf(stderr, "%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
|
||||
LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -4015,7 +4054,7 @@ static bool llama_load_session_file_internal(struct llama_context * ctx, const c
|
||||
const size_t n_state_size_max = llama_get_state_size(ctx);
|
||||
|
||||
if (n_state_size_cur > n_state_size_max) {
|
||||
fprintf(stderr, "%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
|
||||
LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -4032,7 +4071,7 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi
|
||||
try {
|
||||
return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
|
||||
} catch (const std::exception & err) {
|
||||
fprintf(stderr, "error loading session file: %s\n", err.what());
|
||||
LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -4049,15 +4088,9 @@ bool llama_save_session_file(struct llama_context * ctx, const char * path_sessi
|
||||
file.write_u32((uint32_t) n_token_count);
|
||||
file.write_raw(tokens, sizeof(llama_token) * n_token_count);
|
||||
|
||||
// save the context state
|
||||
{
|
||||
const size_t n_state_size_max = llama_get_state_size(ctx);
|
||||
|
||||
std::vector<uint8_t> state_data(n_state_size_max);
|
||||
const size_t n_state_size_cur = llama_copy_state_data(ctx, state_data.data());
|
||||
|
||||
file.write_raw(state_data.data(), n_state_size_cur);
|
||||
}
|
||||
// save the context state using stream saving
|
||||
llama_data_file_context data_ctx(&file);
|
||||
llama_copy_state_data_internal(ctx, &data_ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -4069,7 +4102,7 @@ int llama_eval(
|
||||
int n_past,
|
||||
int n_threads) {
|
||||
if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
|
||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
||||
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -4091,7 +4124,7 @@ int llama_eval_embd(
|
||||
int n_past,
|
||||
int n_threads) {
|
||||
if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
|
||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
||||
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -4112,7 +4145,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) {
|
||||
const std::vector<llama_token> tmp(n_batch, llama_token_bos());
|
||||
|
||||
if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
|
||||
fprintf(stderr, "%s: failed to eval\n", __func__);
|
||||
LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -4128,7 +4161,7 @@ int llama_tokenize_with_model(
|
||||
auto res = llama_tokenize(model->vocab, text, add_bos);
|
||||
|
||||
if (n_max_tokens < (int) res.size()) {
|
||||
fprintf(stderr, "%s: too many tokens\n", __func__);
|
||||
LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
|
||||
return -((int) res.size());
|
||||
}
|
||||
|
||||
@@ -4245,15 +4278,15 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) {
|
||||
void llama_print_timings(struct llama_context * ctx) {
|
||||
const llama_timings timings = llama_get_timings(ctx);
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
|
||||
fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||
LLAMA_LOG_INFO("\n");
|
||||
LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
|
||||
LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
|
||||
fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||
LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
|
||||
fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||
LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
|
||||
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
||||
LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
||||
}
|
||||
|
||||
void llama_reset_timings(struct llama_context * ctx) {
|
||||
@@ -4289,3 +4322,44 @@ const char * llama_print_system_info(void) {
|
||||
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
|
||||
return ctx->model.tensors_by_name;
|
||||
}
|
||||
|
||||
|
||||
void llama_log_set(llama_log_callback log_callback, void * user_data) {
|
||||
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
|
||||
g_state.log_callback_user_data = user_data;
|
||||
}
|
||||
|
||||
#if defined(_MSC_VER) && !defined(vsnprintf)
|
||||
#define vsnprintf _vsnprintf
|
||||
#endif
|
||||
|
||||
static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
|
||||
va_list args_copy;
|
||||
va_copy(args_copy, args);
|
||||
char buffer[128];
|
||||
int len = vsnprintf(buffer, 128, format, args);
|
||||
if (len < 128) {
|
||||
g_state.log_callback(level, buffer, g_state.log_callback_user_data);
|
||||
} else {
|
||||
char* buffer2 = new char[len+1];
|
||||
vsnprintf(buffer2, len+1, format, args_copy);
|
||||
buffer2[len] = 0;
|
||||
g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
|
||||
delete[] buffer2;
|
||||
}
|
||||
va_end(args_copy);
|
||||
}
|
||||
|
||||
static void llama_log_internal(llama_log_level level, const char * format, ...) {
|
||||
va_list args;
|
||||
va_start(args, format);
|
||||
llama_log_internal_v(level, format, args);
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
|
||||
(void) level;
|
||||
(void) user_data;
|
||||
fputs(text, stderr);
|
||||
fflush(stderr);
|
||||
}
|
@@ -1,8 +1,9 @@
|
||||
package llama
|
||||
package llm
|
||||
|
||||
/*
|
||||
#cgo CPPFLAGS: -O3 -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
|
||||
#cgo CXXFLAGS: -std=gnu++11
|
||||
#cgo CFLAGS: -Ofast -std=c11 -fPIC
|
||||
#cgo CPPFLAGS: -Ofast -Wall -Wextra -Wno-unused-function -Wno-unused-variable -DNDEBUG -DGGML_USE_K_QUANTS
|
||||
#cgo CXXFLAGS: -std=c++11 -fPIC
|
||||
#cgo darwin CPPFLAGS: -DGGML_USE_ACCELERATE
|
||||
#cgo darwin,arm64 CPPFLAGS: -DGGML_USE_METAL -DGGML_METAL_NDEBUG
|
||||
#cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
||||
@@ -105,7 +106,105 @@ import (
|
||||
//go:embed ggml-metal.metal
|
||||
var fs embed.FS
|
||||
|
||||
type LLM struct {
|
||||
const ModelFamilyLlama ModelFamily = "llama"
|
||||
|
||||
type llamaModel struct {
|
||||
hyperparameters llamaHyperparameters
|
||||
}
|
||||
|
||||
func (llm *llamaModel) ModelFamily() ModelFamily {
|
||||
return ModelFamilyLlama
|
||||
}
|
||||
|
||||
func (llm *llamaModel) ModelType() ModelType {
|
||||
return ModelType30B
|
||||
}
|
||||
|
||||
func (llm *llamaModel) FileType() FileType {
|
||||
return llm.hyperparameters.FileType
|
||||
}
|
||||
|
||||
type llamaHyperparameters struct {
|
||||
// NumVocab is the size of the model's vocabulary.
|
||||
NumVocab uint32
|
||||
|
||||
// NumEmbd is the size of the model's embedding layer.
|
||||
NumEmbd uint32
|
||||
NumMult uint32
|
||||
NumHead uint32
|
||||
|
||||
// NumLayer is the number of layers in the model.
|
||||
NumLayer uint32
|
||||
NumRot uint32
|
||||
|
||||
// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
|
||||
FileType llamaFileType
|
||||
}
|
||||
|
||||
type llamaFileType uint32
|
||||
|
||||
const (
|
||||
llamaFileTypeF32 llamaFileType = iota
|
||||
llamaFileTypeF16
|
||||
llamaFileTypeQ4_0
|
||||
llamaFileTypeQ4_1
|
||||
llamaFileTypeQ4_1_F16
|
||||
llamaFileTypeQ8_0 llamaFileType = iota + 2
|
||||
llamaFileTypeQ5_0
|
||||
llamaFileTypeQ5_1
|
||||
llamaFileTypeQ2_K
|
||||
llamaFileTypeQ3_K_S
|
||||
llamaFileTypeQ3_K_M
|
||||
llamaFileTypeQ3_K_L
|
||||
llamaFileTypeQ4_K_S
|
||||
llamaFileTypeQ4_K_M
|
||||
llamaFileTypeQ5_K_S
|
||||
llamaFileTypeQ5_K_M
|
||||
llamaFileTypeQ6_K
|
||||
)
|
||||
|
||||
func (ft llamaFileType) String() string {
|
||||
switch ft {
|
||||
case llamaFileTypeF32:
|
||||
return "F32"
|
||||
case llamaFileTypeF16:
|
||||
return "F16"
|
||||
case llamaFileTypeQ4_0:
|
||||
return "Q4_0"
|
||||
case llamaFileTypeQ4_1:
|
||||
return "Q4_1"
|
||||
case llamaFileTypeQ4_1_F16:
|
||||
return "Q4_1_F16"
|
||||
case llamaFileTypeQ8_0:
|
||||
return "Q8_0"
|
||||
case llamaFileTypeQ5_0:
|
||||
return "Q5_0"
|
||||
case llamaFileTypeQ5_1:
|
||||
return "Q5_1"
|
||||
case llamaFileTypeQ2_K:
|
||||
return "Q2_K"
|
||||
case llamaFileTypeQ3_K_S:
|
||||
return "Q3_K_S"
|
||||
case llamaFileTypeQ3_K_M:
|
||||
return "Q3_K_M"
|
||||
case llamaFileTypeQ3_K_L:
|
||||
return "Q3_K_L"
|
||||
case llamaFileTypeQ4_K_S:
|
||||
return "Q4_K_S"
|
||||
case llamaFileTypeQ4_K_M:
|
||||
return "Q4_K_M"
|
||||
case llamaFileTypeQ5_K_S:
|
||||
return "Q5_K_S"
|
||||
case llamaFileTypeQ5_K_M:
|
||||
return "Q5_K_M"
|
||||
case llamaFileTypeQ6_K:
|
||||
return "Q6_K"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type llama struct {
|
||||
params *C.struct_llama_context_params
|
||||
model *C.struct_llama_model
|
||||
ctx *C.struct_llama_context
|
||||
@@ -120,12 +219,12 @@ type LLM struct {
|
||||
api.Options
|
||||
}
|
||||
|
||||
func New(model string, opts api.Options) (*LLM, error) {
|
||||
func newLlama(model string, adapters []string, opts api.Options) (*llama, error) {
|
||||
if _, err := os.Stat(model); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
llm := LLM{Options: opts}
|
||||
llm := llama{Options: opts}
|
||||
|
||||
C.llama_backend_init(C.bool(llm.UseNUMA))
|
||||
|
||||
@@ -145,6 +244,12 @@ func New(model string, opts api.Options) (*LLM, error) {
|
||||
params.embedding = C.bool(llm.EmbeddingOnly)
|
||||
params.rope_freq_base = C.float(llm.RopeFrequencyBase)
|
||||
params.rope_freq_scale = C.float(llm.RopeFrequencyScale)
|
||||
|
||||
if len(adapters) > 0 && llm.UseMMap {
|
||||
log.Printf("must disable mmap to use lora adapters")
|
||||
params.use_mmap = C.bool(false)
|
||||
}
|
||||
|
||||
llm.params = ¶ms
|
||||
|
||||
cModel := C.CString(model)
|
||||
@@ -160,6 +265,15 @@ func New(model string, opts api.Options) (*LLM, error) {
|
||||
return nil, errors.New("failed to create context")
|
||||
}
|
||||
|
||||
for _, adapter := range adapters {
|
||||
cAdapter := C.CString(adapter)
|
||||
defer C.free(unsafe.Pointer(cAdapter))
|
||||
|
||||
if retval := C.llama_model_apply_lora_from_file(llm.model, cAdapter, nil, C.int(llm.NumThread)); retval != 0 {
|
||||
return nil, fmt.Errorf("failed to load adapter %s", adapter)
|
||||
}
|
||||
}
|
||||
|
||||
// warm up the model
|
||||
bos := []C.llama_token{C.llama_token_bos()}
|
||||
C.llama_eval(llm.ctx, unsafe.SliceData(bos), C.int(len(bos)), 0, C.int(opts.NumThread))
|
||||
@@ -168,7 +282,7 @@ func New(model string, opts api.Options) (*LLM, error) {
|
||||
return &llm, nil
|
||||
}
|
||||
|
||||
func (llm *LLM) Close() {
|
||||
func (llm *llama) Close() {
|
||||
llm.gc = true
|
||||
|
||||
llm.mu.Lock()
|
||||
@@ -180,17 +294,16 @@ func (llm *LLM) Close() {
|
||||
C.llama_print_timings(llm.ctx)
|
||||
}
|
||||
|
||||
func (llm *llama) SetOptions(opts api.Options) {
|
||||
llm.Options = opts
|
||||
}
|
||||
|
||||
var errNeedMoreData = errors.New("need more data")
|
||||
|
||||
func (llm *LLM) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
|
||||
func (llm *llama) Predict(ctx []int, prompt string, fn func(api.GenerateResponse)) error {
|
||||
C.llama_reset_timings(llm.ctx)
|
||||
|
||||
tokens := make([]C.llama_token, len(ctx))
|
||||
for i := range tokens {
|
||||
tokens[i] = C.llama_token(ctx[i])
|
||||
}
|
||||
|
||||
llm.marshalPrompt(tokens, prompt)
|
||||
llm.marshalPrompt(ctx, prompt)
|
||||
|
||||
C.llama_set_rng_seed(llm.ctx, C.uint(llm.Seed))
|
||||
|
||||
@@ -205,7 +318,7 @@ func (llm *LLM) Predict(ctx []int, prompt string, fn func(api.GenerateResponse))
|
||||
return err
|
||||
}
|
||||
|
||||
b.WriteString(llm.Decode(token))
|
||||
b.WriteString(llm.Decode(int(token)))
|
||||
|
||||
if err := llm.checkStopConditions(b); err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
@@ -243,7 +356,7 @@ func (llm *LLM) Predict(ctx []int, prompt string, fn func(api.GenerateResponse))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (llm *LLM) checkStopConditions(b bytes.Buffer) error {
|
||||
func (llm *llama) checkStopConditions(b bytes.Buffer) error {
|
||||
for _, stopCondition := range llm.Stop {
|
||||
if stopCondition == strings.TrimSpace(b.String()) {
|
||||
return io.EOF
|
||||
@@ -255,12 +368,17 @@ func (llm *LLM) checkStopConditions(b bytes.Buffer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (llm *LLM) marshalPrompt(ctx []C.llama_token, prompt string) []C.llama_token {
|
||||
func (llm *llama) marshalPrompt(ctx []int, prompt string) []C.llama_token {
|
||||
tokens := append(ctx, llm.Encode(prompt)...)
|
||||
if llm.NumKeep < 0 {
|
||||
llm.NumKeep = len(tokens)
|
||||
}
|
||||
|
||||
cTokens := make([]C.llama_token, len(tokens))
|
||||
for i := range tokens {
|
||||
cTokens[i] = C.llama_token(tokens[i])
|
||||
}
|
||||
|
||||
// min(llm.NumCtx - 4, llm.NumKeep)
|
||||
if llm.NumCtx-4 < llm.NumKeep {
|
||||
llm.NumKeep = llm.NumCtx - 4
|
||||
@@ -269,25 +387,25 @@ func (llm *LLM) marshalPrompt(ctx []C.llama_token, prompt string) []C.llama_toke
|
||||
if len(tokens) >= llm.NumCtx {
|
||||
// truncate input
|
||||
numLeft := (llm.NumCtx - llm.NumKeep) / 2
|
||||
truncated := tokens[:llm.NumKeep]
|
||||
erasedBlocks := (len(tokens) - llm.NumKeep - numLeft - 1) / numLeft
|
||||
truncated = append(truncated, tokens[llm.NumKeep+erasedBlocks*numLeft:]...)
|
||||
copy(llm.last, tokens[len(tokens)-llm.NumCtx:])
|
||||
truncated := cTokens[:llm.NumKeep]
|
||||
erasedBlocks := (len(cTokens) - llm.NumKeep - numLeft - 1) / numLeft
|
||||
truncated = append(truncated, cTokens[llm.NumKeep+erasedBlocks*numLeft:]...)
|
||||
copy(llm.last, cTokens[len(cTokens)-llm.NumCtx:])
|
||||
|
||||
tokens = truncated
|
||||
cTokens = truncated
|
||||
log.Printf("input truncated: num_ctx=%d num_keep=%d num_left=%d num_tokens=%d", llm.NumCtx, llm.NumKeep, numLeft, len(truncated))
|
||||
} else {
|
||||
llm.last = make([]C.llama_token, llm.NumCtx-len(tokens))
|
||||
llm.last = append(llm.last, tokens...)
|
||||
llm.last = make([]C.llama_token, llm.NumCtx-len(cTokens))
|
||||
llm.last = append(llm.last, cTokens...)
|
||||
}
|
||||
|
||||
var i int
|
||||
for i = 0; i < len(llm.embd) && i < len(tokens) && llm.embd[i] == tokens[i]; i++ {
|
||||
for i = 0; i < len(llm.embd) && i < len(cTokens) && llm.embd[i] == cTokens[i]; i++ {
|
||||
// noop
|
||||
}
|
||||
|
||||
llm.embd = tokens
|
||||
if i == len(tokens) {
|
||||
llm.embd = cTokens
|
||||
if i == len(cTokens) {
|
||||
// evaluate at least one token to generate logits
|
||||
i--
|
||||
}
|
||||
@@ -295,31 +413,36 @@ func (llm *LLM) marshalPrompt(ctx []C.llama_token, prompt string) []C.llama_toke
|
||||
llm.cursor = i
|
||||
|
||||
log.Printf("prompt: num_past=%d cached=%v eval=%v", i, len(llm.embd[:i]), len(llm.embd[i:]))
|
||||
return tokens
|
||||
return cTokens
|
||||
}
|
||||
|
||||
func (llm *LLM) Encode(prompt string) []C.llama_token {
|
||||
func (llm *llama) Encode(prompt string) []int {
|
||||
cPrompt := C.CString(prompt)
|
||||
defer C.free(unsafe.Pointer(cPrompt))
|
||||
|
||||
tokens := make([]C.llama_token, len(prompt)+1)
|
||||
if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(tokens), C.int(len(tokens)), true); n > 0 {
|
||||
return tokens[:n]
|
||||
cTokens := make([]C.llama_token, len(prompt)+1)
|
||||
if n := C.llama_tokenize(llm.ctx, cPrompt, unsafe.SliceData(cTokens), C.int(len(cTokens)), true); n > 0 {
|
||||
tokens := make([]int, n)
|
||||
for i := range cTokens[:n] {
|
||||
tokens[i] = int(cTokens[i])
|
||||
}
|
||||
|
||||
return tokens
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (llm *LLM) Decode(tokens ...C.llama_token) string {
|
||||
func (llm *llama) Decode(tokens ...int) string {
|
||||
var sb strings.Builder
|
||||
for _, token := range tokens {
|
||||
sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, token)))
|
||||
sb.WriteString(C.GoString(C.llama_token_to_str(llm.ctx, C.llama_token(token))))
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (llm *LLM) next() (C.llama_token, error) {
|
||||
func (llm *llama) next() (C.llama_token, error) {
|
||||
llm.mu.Lock()
|
||||
defer llm.mu.Unlock()
|
||||
|
||||
@@ -410,7 +533,7 @@ func (llm *LLM) next() (C.llama_token, error) {
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (llm *LLM) Embedding(input string) ([]float64, error) {
|
||||
func (llm *llama) Embedding(input string) ([]float64, error) {
|
||||
if !llm.EmbeddingOnly {
|
||||
return nil, errors.New("llama: embedding not enabled")
|
||||
}
|
||||
@@ -420,11 +543,18 @@ func (llm *LLM) Embedding(input string) ([]float64, error) {
|
||||
return nil, errors.New("llama: tokenize embedding")
|
||||
}
|
||||
|
||||
retval := C.llama_eval(llm.ctx, unsafe.SliceData(tokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
|
||||
cTokens := make([]C.llama_token, len(tokens))
|
||||
for i := range tokens {
|
||||
cTokens[i] = C.llama_token(tokens[i])
|
||||
}
|
||||
|
||||
retval := C.llama_eval(llm.ctx, unsafe.SliceData(cTokens), C.int(len(tokens)), 0, C.int(llm.NumThread))
|
||||
if retval != 0 {
|
||||
return nil, errors.New("llama: eval")
|
||||
}
|
||||
|
||||
C.llama_print_timings(llm.ctx)
|
||||
|
||||
n := C.llama_n_embd(llm.ctx)
|
||||
if n <= 0 {
|
||||
return nil, errors.New("llama: no embeddings generated")
|
@@ -1,5 +1,5 @@
|
||||
/**
|
||||
* llama.cpp - git 8183159cf3def112f6d1fe94815fce70e1bffa12
|
||||
* llama.cpp - git 3ebb00935f3f0522b75df49c2769ab1774b91380
|
||||
*
|
||||
* MIT License
|
||||
*
|
||||
@@ -112,7 +112,20 @@ extern "C" {
|
||||
|
||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||
|
||||
struct llama_context_params {
|
||||
enum llama_log_level {
|
||||
LLAMA_LOG_LEVEL_ERROR = 2,
|
||||
LLAMA_LOG_LEVEL_WARN = 3,
|
||||
LLAMA_LOG_LEVEL_INFO = 4
|
||||
};
|
||||
|
||||
// Signature for logging events
|
||||
// Note that text includes the new line character at the end for most events.
|
||||
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
|
||||
// if it exists.
|
||||
// It might not exist for progress report where '.' is output repeatedly.
|
||||
typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
|
||||
|
||||
struct llama_context_params {
|
||||
uint32_t seed; // RNG seed, -1 for random
|
||||
int32_t n_ctx; // text context
|
||||
int32_t n_batch; // prompt processing batch size
|
||||
@@ -221,6 +234,10 @@ extern "C" {
|
||||
int32_t n_eval;
|
||||
};
|
||||
|
||||
// Set callback for all future logging events.
|
||||
// If this is not called, or NULL is supplied, everything is output on stderr.
|
||||
LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
|
||||
|
||||
LLAMA_API int llama_max_devices();
|
||||
|
||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
@@ -1,4 +1,4 @@
|
||||
package llama
|
||||
package llm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -39,6 +39,7 @@ func initBackend() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer actual.Close()
|
||||
|
||||
actualSum := sha256.New()
|
||||
if _, err := io.Copy(actualSum, actual); err != nil {
|
74
llm/llm.go
Normal file
74
llm/llm.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package llm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/pbnjay/memory"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
)
|
||||
|
||||
type LLM interface {
|
||||
Predict([]int, string, func(api.GenerateResponse)) error
|
||||
Embedding(string) ([]float64, error)
|
||||
Encode(string) []int
|
||||
Decode(...int) string
|
||||
SetOptions(api.Options)
|
||||
Close()
|
||||
}
|
||||
|
||||
func New(model string, adapters []string, opts api.Options) (LLM, error) {
|
||||
if _, err := os.Stat(model); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := os.Open(model)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
ggml, err := DecodeGGML(f, ModelFamilyLlama)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch ggml.FileType().String() {
|
||||
case "F32", "F16", "Q5_0", "Q5_1", "Q8_0":
|
||||
if opts.NumGPU != 0 {
|
||||
// F32, F16, Q5_0, Q5_1, and Q8_0 do not support Metal API and will
|
||||
// cause the runner to segmentation fault so disable GPU
|
||||
log.Printf("WARNING: GPU disabled for F32, F16, Q5_0, Q5_1, and Q8_0")
|
||||
opts.NumGPU = 0
|
||||
}
|
||||
}
|
||||
|
||||
totalResidentMemory := memory.TotalMemory()
|
||||
switch ggml.ModelType() {
|
||||
case ModelType3B, ModelType7B:
|
||||
if totalResidentMemory < 8*1024*1024 {
|
||||
return nil, fmt.Errorf("model requires at least 8GB of memory")
|
||||
}
|
||||
case ModelType13B:
|
||||
if totalResidentMemory < 16*1024*1024 {
|
||||
return nil, fmt.Errorf("model requires at least 16GB of memory")
|
||||
}
|
||||
case ModelType30B:
|
||||
if totalResidentMemory < 32*1024*1024 {
|
||||
return nil, fmt.Errorf("model requires at least 32GB of memory")
|
||||
}
|
||||
case ModelType65B:
|
||||
if totalResidentMemory < 64*1024*1024 {
|
||||
return nil, fmt.Errorf("model requires at least 64GB of memory")
|
||||
}
|
||||
}
|
||||
|
||||
switch ggml.ModelFamily() {
|
||||
case ModelFamilyLlama:
|
||||
return newLlama(model, adapters, opts)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown ggml type: %s", ggml.ModelFamily())
|
||||
}
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package llama
|
||||
package llm
|
||||
|
||||
import (
|
||||
"fmt"
|
3
main.go
3
main.go
@@ -4,8 +4,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/jmorganca/ollama/cmd"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd.NewCLI().ExecuteContext(context.Background())
|
||||
cobra.CheckErr(cmd.NewCLI().ExecuteContext(context.Background()))
|
||||
}
|
||||
|
@@ -40,16 +40,22 @@ func Parse(reader io.Reader) ([]Command, error) {
|
||||
command.Args = string(fields[1])
|
||||
// copy command for validation
|
||||
modelCommand = command
|
||||
case "LICENSE", "TEMPLATE", "SYSTEM", "PROMPT", "EMBED":
|
||||
case "LICENSE", "TEMPLATE", "SYSTEM", "PROMPT", "EMBED", "ADAPTER":
|
||||
command.Name = string(bytes.ToLower(fields[0]))
|
||||
command.Args = string(fields[1])
|
||||
case "PARAMETER":
|
||||
fields = bytes.SplitN(fields[1], []byte(" "), 2)
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("missing value for %s", fields)
|
||||
}
|
||||
|
||||
command.Name = string(fields[0])
|
||||
command.Args = string(fields[1])
|
||||
default:
|
||||
// log a warning for unknown commands
|
||||
log.Printf("WARNING: Unknown command: %s", fields[0])
|
||||
if !bytes.HasPrefix(fields[0], []byte("#")) {
|
||||
// log a warning for unknown commands
|
||||
log.Printf("WARNING: Unknown command: %s", fields[0])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,7 @@ package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -48,10 +48,15 @@ func (r AuthRedirect) URL() (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%s?service=%s&scope=%s&ts=%d&nonce=%s", r.Realm, r.Service, r.Scope, time.Now().Unix(), nonce), nil
|
||||
scopes := []string{}
|
||||
for _, s := range strings.Split(r.Scope, " ") {
|
||||
scopes = append(scopes, fmt.Sprintf("scope=%s", s))
|
||||
}
|
||||
scopeStr := strings.Join(scopes, "&")
|
||||
return fmt.Sprintf("%s?service=%s&%s&ts=%d&nonce=%s", r.Realm, r.Service, scopeStr, time.Now().Unix(), nonce), nil
|
||||
}
|
||||
|
||||
func getAuthToken(redirData AuthRedirect, regOpts *RegistryOptions) (string, error) {
|
||||
func getAuthToken(ctx context.Context, redirData AuthRedirect, regOpts *RegistryOptions) (string, error) {
|
||||
url, err := redirData.URL()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -62,9 +67,9 @@ func getAuthToken(redirData AuthRedirect, regOpts *RegistryOptions) (string, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
keyPath := path.Join(home, ".ollama/id_ed25519")
|
||||
keyPath := path.Join(home, ".ollama", "id_ed25519")
|
||||
|
||||
rawKey, err := ioutil.ReadFile(keyPath)
|
||||
rawKey, err := os.ReadFile(keyPath)
|
||||
if err != nil {
|
||||
log.Printf("Failed to load private key: %v", err)
|
||||
return "", err
|
||||
@@ -93,7 +98,7 @@ func getAuthToken(redirData AuthRedirect, regOpts *RegistryOptions) (string, err
|
||||
"Authorization": sig,
|
||||
}
|
||||
|
||||
resp, err := makeRequest("GET", url, headers, nil, regOpts)
|
||||
resp, err := makeRequest(ctx, "GET", url, headers, nil, regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't get token: %q", err)
|
||||
}
|
||||
|
@@ -25,17 +25,27 @@ type FileDownload struct {
|
||||
|
||||
var inProgress sync.Map // map of digests currently being downloaded to their current download progress
|
||||
|
||||
type downloadOpts struct {
|
||||
mp ModelPath
|
||||
digest string
|
||||
regOpts *RegistryOptions
|
||||
fn func(api.ProgressResponse)
|
||||
retry int // track the number of retries on this download
|
||||
}
|
||||
|
||||
const maxRetry = 3
|
||||
|
||||
// downloadBlob downloads a blob from the registry and stores it in the blobs directory
|
||||
func downloadBlob(ctx context.Context, mp ModelPath, digest string, regOpts *RegistryOptions, fn func(api.ProgressResponse)) error {
|
||||
fp, err := GetBlobsPath(digest)
|
||||
func downloadBlob(ctx context.Context, opts downloadOpts) error {
|
||||
fp, err := GetBlobsPath(opts.digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi, _ := os.Stat(fp); fi != nil {
|
||||
// we already have the file, so return
|
||||
fn(api.ProgressResponse{
|
||||
Digest: digest,
|
||||
opts.fn(api.ProgressResponse{
|
||||
Digest: opts.digest,
|
||||
Total: int(fi.Size()),
|
||||
Completed: int(fi.Size()),
|
||||
})
|
||||
@@ -44,24 +54,33 @@ func downloadBlob(ctx context.Context, mp ModelPath, digest string, regOpts *Reg
|
||||
}
|
||||
|
||||
fileDownload := &FileDownload{
|
||||
Digest: digest,
|
||||
Digest: opts.digest,
|
||||
FilePath: fp,
|
||||
Total: 1, // dummy value to indicate that we don't know the total size yet
|
||||
Completed: 0,
|
||||
}
|
||||
|
||||
_, downloading := inProgress.LoadOrStore(digest, fileDownload)
|
||||
_, downloading := inProgress.LoadOrStore(opts.digest, fileDownload)
|
||||
if downloading {
|
||||
// this is another client requesting the server to download the same blob concurrently
|
||||
return monitorDownload(ctx, mp, regOpts, fileDownload, fn)
|
||||
return monitorDownload(ctx, opts, fileDownload)
|
||||
}
|
||||
return doDownload(ctx, mp, regOpts, fileDownload, fn)
|
||||
if err := doDownload(ctx, opts, fileDownload); err != nil {
|
||||
if errors.Is(err, errDownload) && opts.retry < maxRetry {
|
||||
opts.retry++
|
||||
log.Print(err)
|
||||
log.Printf("retrying download of %s", opts.digest)
|
||||
return downloadBlob(ctx, opts)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var downloadMu sync.Mutex // mutex to check to resume a download while monitoring
|
||||
|
||||
// monitorDownload monitors the download progress of a blob and resumes it if it is interrupted
|
||||
func monitorDownload(ctx context.Context, mp ModelPath, regOpts *RegistryOptions, f *FileDownload, fn func(api.ProgressResponse)) error {
|
||||
func monitorDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
|
||||
tick := time.NewTicker(time.Second)
|
||||
for range tick.C {
|
||||
done, resume, err := func() (bool, bool, error) {
|
||||
@@ -72,7 +91,7 @@ func monitorDownload(ctx context.Context, mp ModelPath, regOpts *RegistryOptions
|
||||
// check once again if the download is complete
|
||||
if fi, _ := os.Stat(f.FilePath); fi != nil {
|
||||
// successful download while monitoring
|
||||
fn(api.ProgressResponse{
|
||||
opts.fn(api.ProgressResponse{
|
||||
Digest: f.Digest,
|
||||
Total: int(fi.Size()),
|
||||
Completed: int(fi.Size()),
|
||||
@@ -87,7 +106,7 @@ func monitorDownload(ctx context.Context, mp ModelPath, regOpts *RegistryOptions
|
||||
if !ok {
|
||||
return false, false, fmt.Errorf("invalid type for in progress download: %T", val)
|
||||
}
|
||||
fn(api.ProgressResponse{
|
||||
opts.fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("downloading %s", f.Digest),
|
||||
Digest: f.Digest,
|
||||
Total: int(f.Total),
|
||||
@@ -103,16 +122,20 @@ func monitorDownload(ctx context.Context, mp ModelPath, regOpts *RegistryOptions
|
||||
return nil
|
||||
}
|
||||
if resume {
|
||||
return doDownload(ctx, mp, regOpts, f, fn)
|
||||
return doDownload(ctx, opts, f)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var chunkSize = 1024 * 1024 // 1 MiB in bytes
|
||||
var (
|
||||
chunkSize = 1024 * 1024 // 1 MiB in bytes
|
||||
errDownload = fmt.Errorf("download failed")
|
||||
)
|
||||
|
||||
// doDownload downloads a blob from the registry and stores it in the blobs directory
|
||||
func doDownload(ctx context.Context, mp ModelPath, regOpts *RegistryOptions, f *FileDownload, fn func(api.ProgressResponse)) error {
|
||||
func doDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
|
||||
defer inProgress.Delete(f.Digest)
|
||||
var size int64
|
||||
|
||||
fi, err := os.Stat(f.FilePath + "-partial")
|
||||
@@ -132,21 +155,21 @@ func doDownload(ctx context.Context, mp ModelPath, regOpts *RegistryOptions, f *
|
||||
}
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/v2/%s/blobs/%s", mp.Registry, mp.GetNamespaceRepository(), f.Digest)
|
||||
url := fmt.Sprintf("%s/v2/%s/blobs/%s", opts.mp.Registry, opts.mp.GetNamespaceRepository(), f.Digest)
|
||||
headers := map[string]string{
|
||||
"Range": fmt.Sprintf("bytes=%d-", size),
|
||||
}
|
||||
|
||||
resp, err := makeRequest("GET", url, headers, nil, regOpts)
|
||||
resp, err := makeRequest(ctx, "GET", url, headers, nil, opts.regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't download blob: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("%w: %w", errDownload, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("on download registry responded with code %d: %v", resp.StatusCode, string(body))
|
||||
return fmt.Errorf("%w: on download registry responded with code %d: %v", errDownload, resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
err = os.MkdirAll(path.Dir(f.FilePath), 0o700)
|
||||
@@ -173,7 +196,7 @@ outerLoop:
|
||||
inProgress.Delete(f.Digest)
|
||||
return nil
|
||||
default:
|
||||
fn(api.ProgressResponse{
|
||||
opts.fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("downloading %s", f.Digest),
|
||||
Digest: f.Digest,
|
||||
Total: int(f.Total),
|
||||
@@ -186,7 +209,7 @@ outerLoop:
|
||||
}
|
||||
|
||||
if err := os.Rename(f.FilePath+"-partial", f.FilePath); err != nil {
|
||||
fn(api.ProgressResponse{
|
||||
opts.fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("error renaming file: %v", err),
|
||||
Digest: f.Digest,
|
||||
Total: int(f.Total),
|
||||
@@ -201,15 +224,13 @@ outerLoop:
|
||||
|
||||
n, err := io.CopyN(out, resp.Body, int64(chunkSize))
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return err
|
||||
return fmt.Errorf("%w: %w", errDownload, err)
|
||||
}
|
||||
f.Completed += n
|
||||
|
||||
inProgress.Store(f.Digest, f)
|
||||
}
|
||||
|
||||
inProgress.Delete(f.Digest)
|
||||
|
||||
log.Printf("success getting %s\n", f.Digest)
|
||||
return nil
|
||||
}
|
||||
|
421
server/images.go
421
server/images.go
@@ -13,17 +13,20 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
"github.com/jmorganca/ollama/llama"
|
||||
"github.com/jmorganca/ollama/llm"
|
||||
"github.com/jmorganca/ollama/parser"
|
||||
"github.com/jmorganca/ollama/vector"
|
||||
)
|
||||
|
||||
const MaxRetries = 3
|
||||
|
||||
type RegistryOptions struct {
|
||||
Insecure bool
|
||||
Username string
|
||||
@@ -32,13 +35,14 @@ type RegistryOptions struct {
|
||||
}
|
||||
|
||||
type Model struct {
|
||||
Name string `json:"name"`
|
||||
ModelPath string
|
||||
Template string
|
||||
System string
|
||||
Digest string
|
||||
Options map[string]interface{}
|
||||
Embeddings []vector.Embedding
|
||||
Name string `json:"name"`
|
||||
ModelPath string
|
||||
AdapterPaths []string
|
||||
Template string
|
||||
System string
|
||||
Digest string
|
||||
Options map[string]interface{}
|
||||
Embeddings []vector.Embedding
|
||||
}
|
||||
|
||||
func (m *Model) Prompt(request api.GenerateRequest, embedding string) (string, error) {
|
||||
@@ -91,6 +95,7 @@ type Layer struct {
|
||||
MediaType string `json:"mediaType"`
|
||||
Digest string `json:"digest"`
|
||||
Size int `json:"size"`
|
||||
From string `json:"from,omitempty"`
|
||||
}
|
||||
|
||||
type LayerReader struct {
|
||||
@@ -99,9 +104,14 @@ type LayerReader struct {
|
||||
}
|
||||
|
||||
type ConfigV2 struct {
|
||||
ModelFamily llm.ModelFamily `json:"model_family"`
|
||||
ModelType string `json:"model_type"`
|
||||
FileType string `json:"file_type"`
|
||||
RootFS RootFS `json:"rootfs"`
|
||||
|
||||
// required by spec
|
||||
Architecture string `json:"architecture"`
|
||||
OS string `json:"os"`
|
||||
RootFS RootFS `json:"rootfs"`
|
||||
}
|
||||
|
||||
type RootFS struct {
|
||||
@@ -174,6 +184,8 @@ func GetModel(name string) (*Model, error) {
|
||||
if err = json.NewDecoder(file).Decode(&model.Embeddings); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "application/vnd.ollama.image.adapter":
|
||||
model.AdapterPaths = append(model.AdapterPaths, filename)
|
||||
case "application/vnd.ollama.image.template":
|
||||
bts, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
@@ -246,16 +258,22 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
return err
|
||||
}
|
||||
|
||||
config := ConfigV2{
|
||||
Architecture: "amd64",
|
||||
OS: "linux",
|
||||
}
|
||||
|
||||
var layers []*LayerReader
|
||||
params := make(map[string][]string)
|
||||
embed := EmbeddingParams{fn: fn, opts: api.DefaultOptions()}
|
||||
embed := EmbeddingParams{fn: fn}
|
||||
for _, c := range commands {
|
||||
log.Printf("[%s] - %s\n", c.Name, c.Args)
|
||||
switch c.Name {
|
||||
case "model":
|
||||
fn(api.ProgressResponse{Status: "looking for model"})
|
||||
embed.model = c.Args
|
||||
mf, err := GetManifest(ParseModelPath(c.Args))
|
||||
mp := ParseModelPath(c.Args)
|
||||
mf, err := GetManifest(mp)
|
||||
if err != nil {
|
||||
modelFile, err := filenameWithPath(path, c.Args)
|
||||
if err != nil {
|
||||
@@ -276,6 +294,7 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
embed.model = modelFile
|
||||
// create a model from this specified file
|
||||
fn(api.ProgressResponse{Status: "creating model layer"})
|
||||
file, err := os.Open(modelFile)
|
||||
@@ -284,6 +303,18 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
ggml, err := llm.DecodeGGML(file, llm.ModelFamilyLlama)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.ModelFamily = ggml.ModelFamily()
|
||||
config.ModelType = ggml.ModelType().String()
|
||||
config.FileType = ggml.FileType().String()
|
||||
|
||||
// reset the file
|
||||
file.Seek(0, io.SeekStart)
|
||||
|
||||
l, err := CreateLayer(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create layer: %v", err)
|
||||
@@ -292,6 +323,7 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
layers = append(layers, l)
|
||||
}
|
||||
}
|
||||
|
||||
if mf != nil {
|
||||
log.Printf("manifest = %#v", mf)
|
||||
for _, l := range mf.Layers {
|
||||
@@ -299,6 +331,7 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newLayer.From = mp.GetNamespaceRepository()
|
||||
layers = append(layers, newLayer)
|
||||
}
|
||||
}
|
||||
@@ -308,6 +341,40 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
return err
|
||||
}
|
||||
embed.files = append(embed.files, embedFilePath)
|
||||
case "adapter":
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("creating model %s layer", c.Name)})
|
||||
|
||||
fp := c.Args
|
||||
if strings.HasPrefix(fp, "~/") {
|
||||
parts := strings.Split(fp, "/")
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %v", err)
|
||||
}
|
||||
|
||||
fp = filepath.Join(home, filepath.Join(parts[1:]...))
|
||||
}
|
||||
|
||||
// If filePath is not an absolute path, make it relative to the modelfile path
|
||||
if !filepath.IsAbs(fp) {
|
||||
fp = filepath.Join(filepath.Dir(path), fp)
|
||||
}
|
||||
|
||||
// create a model from this specified file
|
||||
fn(api.ProgressResponse{Status: "creating model layer"})
|
||||
|
||||
file, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
l, err := CreateLayer(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create layer: %v", err)
|
||||
}
|
||||
l.MediaType = "application/vnd.ollama.image.adapter"
|
||||
layers = append(layers, l)
|
||||
case "license":
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("creating model %s layer", c.Name)})
|
||||
mediaType := fmt.Sprintf("application/vnd.ollama.image.%s", c.Name)
|
||||
@@ -321,7 +388,7 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
layers = append(layers, layer)
|
||||
case "template", "system", "prompt":
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("creating model %s layer", c.Name)})
|
||||
// remove the prompt layer if one exists
|
||||
// remove the layer if one exists
|
||||
mediaType := fmt.Sprintf("application/vnd.ollama.image.%s", c.Name)
|
||||
layers = removeLayerFromLayers(layers, mediaType)
|
||||
|
||||
@@ -360,8 +427,7 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
layers = append(layers, l)
|
||||
|
||||
// apply these parameters to the embedding options, in case embeddings need to be generated using this model
|
||||
embed.opts = api.DefaultOptions()
|
||||
embed.opts.FromMap(formattedParams)
|
||||
embed.opts = formattedParams
|
||||
}
|
||||
|
||||
// generate the embedding layers
|
||||
@@ -383,14 +449,13 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
|
||||
// Create a layer for the config object
|
||||
fn(api.ProgressResponse{Status: "creating config layer"})
|
||||
cfg, err := createConfigLayer(digests)
|
||||
cfg, err := createConfigLayer(config, digests)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layers = append(layers, cfg)
|
||||
|
||||
err = SaveLayers(layers, fn, false)
|
||||
if err != nil {
|
||||
if err := SaveLayers(layers, fn, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -407,7 +472,7 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
|
||||
type EmbeddingParams struct {
|
||||
model string
|
||||
opts api.Options
|
||||
opts map[string]interface{}
|
||||
files []string // paths to files to embed
|
||||
fn func(resp api.ProgressResponse)
|
||||
}
|
||||
@@ -416,29 +481,25 @@ type EmbeddingParams struct {
|
||||
func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
|
||||
layers := []*LayerReader{}
|
||||
if len(e.files) > 0 {
|
||||
if _, err := os.Stat(e.model); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// this is a model name rather than the file
|
||||
model, err := GetModel(e.model)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get model to generate embeddings: %v", err)
|
||||
}
|
||||
e.model = model.ModelPath
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to get model file to generate embeddings: %v", err)
|
||||
// check if the model is a file path or a model name
|
||||
model, err := GetModel(e.model)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "couldn't open file") {
|
||||
return nil, fmt.Errorf("unexpected error opening model to generate embeddings: %v", err)
|
||||
}
|
||||
// the model may be a file path, create a model from this file
|
||||
model = &Model{ModelPath: e.model}
|
||||
}
|
||||
|
||||
e.opts.EmbeddingOnly = true
|
||||
llm, err := llama.New(e.model, e.opts)
|
||||
if err != nil {
|
||||
if err := load(model, e.opts, defaultSessionDuration); err != nil {
|
||||
return nil, fmt.Errorf("load model to generate embeddings: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if llm != nil {
|
||||
llm.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// this will be used to check if we already have embeddings for a file
|
||||
modelInfo, err := os.Stat(model.ModelPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get model file info: %v", err)
|
||||
}
|
||||
|
||||
addedFiles := make(map[string]bool) // keep track of files that have already been added
|
||||
for _, filePattern := range e.files {
|
||||
@@ -452,6 +513,14 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
|
||||
continue
|
||||
}
|
||||
addedFiles[filePath] = true
|
||||
// check if we already have embeddings for this file path
|
||||
layerIdentifier := fmt.Sprintf("%s:%s:%s:%d", filePath, e.model, modelInfo.ModTime().Format("2006-01-02 15:04:05"), modelInfo.Size())
|
||||
digest, _ := GetSHA256Digest(strings.NewReader(layerIdentifier))
|
||||
existing, err := existingFileEmbeddings(digest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check existing embeddings for file %s: %v", filePath, err)
|
||||
}
|
||||
|
||||
// TODO: check file type
|
||||
f, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
@@ -480,7 +549,12 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
|
||||
Total: len(data) - 1,
|
||||
Completed: i,
|
||||
})
|
||||
embed, err := llm.Embedding(d)
|
||||
if len(existing[d]) > 0 {
|
||||
// already have an embedding for this line
|
||||
embeddings = append(embeddings, vector.Embedding{Data: d, Vector: existing[d]})
|
||||
continue
|
||||
}
|
||||
embed, err := loaded.llm.Embedding(d)
|
||||
if err != nil {
|
||||
log.Printf("failed to generate embedding for '%s' line %d: %v", filePath, i+1, err)
|
||||
continue
|
||||
@@ -494,17 +568,11 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
|
||||
}
|
||||
r := bytes.NewReader(b)
|
||||
|
||||
digest, size := GetSHA256Digest(r)
|
||||
// Reset the position of the reader after calculating the digest
|
||||
if _, err := r.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, fmt.Errorf("could not reset embed reader: %w", err)
|
||||
}
|
||||
|
||||
layer := &LayerReader{
|
||||
Layer: Layer{
|
||||
MediaType: "application/vnd.ollama.image.embed",
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
Size: r.Len(),
|
||||
},
|
||||
Reader: r,
|
||||
}
|
||||
@@ -516,6 +584,32 @@ func embeddingLayers(e EmbeddingParams) ([]*LayerReader, error) {
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
// existingFileEmbeddings checks if we already have embeddings for a file and loads them into a look-up map
|
||||
func existingFileEmbeddings(digest string) (map[string][]float64, error) {
|
||||
path, err := GetBlobsPath(digest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("embeddings blobs path: %w", err)
|
||||
}
|
||||
existingFileEmbeddings := make(map[string][]float64)
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
// already have some embeddings for this file, load embeddings previously generated
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open existing embedding file: %s", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
existing := []vector.Embedding{}
|
||||
if err = json.NewDecoder(file).Decode(&existing); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, e := range existing {
|
||||
existingFileEmbeddings[e.Data] = e.Vector
|
||||
}
|
||||
}
|
||||
return existingFileEmbeddings, nil
|
||||
}
|
||||
|
||||
func removeLayerFromLayers(layers []*LayerReader, mediaType string) []*LayerReader {
|
||||
j := 0
|
||||
for _, l := range layers {
|
||||
@@ -536,7 +630,8 @@ func SaveLayers(layers []*LayerReader, fn func(resp api.ProgressResponse), force
|
||||
}
|
||||
|
||||
_, err = os.Stat(fp)
|
||||
if os.IsNotExist(err) || force {
|
||||
// note: embed layers are always written since their digest doesnt indicate anything about the contents
|
||||
if os.IsNotExist(err) || force || layer.MediaType == "application/vnd.ollama.image.embed" {
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("writing layer %s", layer.Digest)})
|
||||
|
||||
out, err := os.Create(fp)
|
||||
@@ -676,7 +771,7 @@ func getLayerDigests(layers []*LayerReader) ([]string, error) {
|
||||
// CreateLayer creates a Layer object from a given file
|
||||
func CreateLayer(f io.ReadSeeker) (*LayerReader, error) {
|
||||
digest, size := GetSHA256Digest(f)
|
||||
f.Seek(0, 0)
|
||||
f.Seek(0, io.SeekStart)
|
||||
|
||||
layer := &LayerReader{
|
||||
Layer: Layer{
|
||||
@@ -768,10 +863,6 @@ func DeleteModel(name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// only delete the files which are still in the deleteMap
|
||||
for k, v := range deleteMap {
|
||||
if v {
|
||||
@@ -800,7 +891,7 @@ func DeleteModel(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func PushModel(name string, regOpts *RegistryOptions, fn func(api.ProgressResponse)) error {
|
||||
func PushModel(ctx context.Context, name string, regOpts *RegistryOptions, fn func(api.ProgressResponse)) error {
|
||||
mp := ParseModelPath(name)
|
||||
|
||||
fn(api.ProgressResponse{Status: "retrieving manifest"})
|
||||
@@ -816,7 +907,7 @@ func PushModel(name string, regOpts *RegistryOptions, fn func(api.ProgressRespon
|
||||
layers = append(layers, &manifest.Config)
|
||||
|
||||
for _, layer := range layers {
|
||||
exists, err := checkBlobExistence(mp, layer.Digest, regOpts)
|
||||
exists, err := checkBlobExistence(ctx, mp, layer.Digest, regOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -838,14 +929,24 @@ func PushModel(name string, regOpts *RegistryOptions, fn func(api.ProgressRespon
|
||||
Total: layer.Size,
|
||||
})
|
||||
|
||||
location, err := startUpload(mp, regOpts)
|
||||
location, err := startUpload(ctx, mp, layer, regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't start upload: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = uploadBlobChunked(mp, location, layer, regOpts, fn)
|
||||
if err != nil {
|
||||
if strings.HasPrefix(path.Base(location), "sha256:") {
|
||||
layer.Digest = path.Base(location)
|
||||
fn(api.ProgressResponse{
|
||||
Status: "using existing layer",
|
||||
Digest: layer.Digest,
|
||||
Total: layer.Size,
|
||||
Completed: layer.Size,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
if err := uploadBlobChunked(ctx, mp, location, layer, regOpts, fn); err != nil {
|
||||
log.Printf("error uploading blob: %v", err)
|
||||
return err
|
||||
}
|
||||
@@ -862,18 +963,12 @@ func PushModel(name string, regOpts *RegistryOptions, fn func(api.ProgressRespon
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := makeRequest("PUT", url, headers, bytes.NewReader(manifestJSON), regOpts)
|
||||
resp, err := makeRequestWithRetry(ctx, "PUT", url, headers, bytes.NewReader(manifestJSON), regOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check for success: For a successful upload, the Docker registry will respond with a 201 Created
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("on push registry responded with code %d: %v", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
fn(api.ProgressResponse{Status: "success"})
|
||||
|
||||
return nil
|
||||
@@ -884,7 +979,7 @@ func PullModel(ctx context.Context, name string, regOpts *RegistryOptions, fn fu
|
||||
|
||||
fn(api.ProgressResponse{Status: "pulling manifest"})
|
||||
|
||||
manifest, err := pullModelManifest(mp, regOpts)
|
||||
manifest, err := pullModelManifest(ctx, mp, regOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pull model manifest: %s", err)
|
||||
}
|
||||
@@ -894,7 +989,14 @@ func PullModel(ctx context.Context, name string, regOpts *RegistryOptions, fn fu
|
||||
layers = append(layers, &manifest.Config)
|
||||
|
||||
for _, layer := range layers {
|
||||
if err := downloadBlob(ctx, mp, layer.Digest, regOpts, fn); err != nil {
|
||||
if err := downloadBlob(
|
||||
ctx,
|
||||
downloadOpts{
|
||||
mp: mp,
|
||||
digest: layer.Digest,
|
||||
regOpts: regOpts,
|
||||
fn: fn,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -940,13 +1042,13 @@ func PullModel(ctx context.Context, name string, regOpts *RegistryOptions, fn fu
|
||||
return nil
|
||||
}
|
||||
|
||||
func pullModelManifest(mp ModelPath, regOpts *RegistryOptions) (*ManifestV2, error) {
|
||||
func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *RegistryOptions) (*ManifestV2, error) {
|
||||
url := fmt.Sprintf("%s/v2/%s/manifests/%s", mp.Registry, mp.GetNamespaceRepository(), mp.Tag)
|
||||
headers := map[string]string{
|
||||
"Accept": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
}
|
||||
|
||||
resp, err := makeRequest("GET", url, headers, nil, regOpts)
|
||||
resp, err := makeRequest(ctx, "GET", url, headers, nil, regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't get manifest: %v", err)
|
||||
return nil, err
|
||||
@@ -970,15 +1072,10 @@ func pullModelManifest(mp ModelPath, regOpts *RegistryOptions) (*ManifestV2, err
|
||||
return m, err
|
||||
}
|
||||
|
||||
func createConfigLayer(layers []string) (*LayerReader, error) {
|
||||
// TODO change architecture and OS
|
||||
config := ConfigV2{
|
||||
Architecture: "arm64",
|
||||
OS: "linux",
|
||||
RootFS: RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: layers,
|
||||
},
|
||||
func createConfigLayer(config ConfigV2, layers []string) (*LayerReader, error) {
|
||||
config.RootFS = RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: layers,
|
||||
}
|
||||
|
||||
configJSON, err := json.Marshal(config)
|
||||
@@ -1010,22 +1107,21 @@ func GetSHA256Digest(r io.Reader) (string, int) {
|
||||
return fmt.Sprintf("sha256:%x", h.Sum(nil)), int(n)
|
||||
}
|
||||
|
||||
func startUpload(mp ModelPath, regOpts *RegistryOptions) (string, error) {
|
||||
url := fmt.Sprintf("%s/v2/%s/blobs/uploads/", mp.Registry, mp.GetNamespaceRepository())
|
||||
type requestContextKey string
|
||||
|
||||
resp, err := makeRequest("POST", url, nil, nil, regOpts)
|
||||
func startUpload(ctx context.Context, mp ModelPath, layer *Layer, regOpts *RegistryOptions) (string, error) {
|
||||
url := fmt.Sprintf("%s/v2/%s/blobs/uploads/", mp.Registry, mp.GetNamespaceRepository())
|
||||
if layer.From != "" {
|
||||
url = fmt.Sprintf("%s/v2/%s/blobs/uploads/?mount=%s&from=%s", mp.Registry, mp.GetNamespaceRepository(), layer.Digest, layer.From)
|
||||
}
|
||||
|
||||
resp, err := makeRequestWithRetry(ctx, "POST", url, nil, nil, regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't start upload: %v", err)
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check for success
|
||||
if resp.StatusCode != http.StatusAccepted {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return "", fmt.Errorf("on upload registry responded with code %d: %s", resp.StatusCode, body)
|
||||
}
|
||||
|
||||
// Extract UUID location from header
|
||||
location := resp.Header.Get("Location")
|
||||
if location == "" {
|
||||
@@ -1036,10 +1132,10 @@ func startUpload(mp ModelPath, regOpts *RegistryOptions) (string, error) {
|
||||
}
|
||||
|
||||
// Function to check if a blob already exists in the Docker registry
|
||||
func checkBlobExistence(mp ModelPath, digest string, regOpts *RegistryOptions) (bool, error) {
|
||||
func checkBlobExistence(ctx context.Context, mp ModelPath, digest string, regOpts *RegistryOptions) (bool, error) {
|
||||
url := fmt.Sprintf("%s/v2/%s/blobs/%s", mp.Registry, mp.GetNamespaceRepository(), digest)
|
||||
|
||||
resp, err := makeRequest("HEAD", url, nil, nil, regOpts)
|
||||
resp, err := makeRequest(ctx, "HEAD", url, nil, nil, regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't check for blob: %v", err)
|
||||
return false, err
|
||||
@@ -1050,10 +1146,9 @@ func checkBlobExistence(mp ModelPath, digest string, regOpts *RegistryOptions) (
|
||||
return resp.StatusCode == http.StatusOK, nil
|
||||
}
|
||||
|
||||
func uploadBlobChunked(mp ModelPath, url string, layer *Layer, regOpts *RegistryOptions, fn func(api.ProgressResponse)) error {
|
||||
func uploadBlobChunked(ctx context.Context, mp ModelPath, url string, layer *Layer, regOpts *RegistryOptions, fn func(api.ProgressResponse)) error {
|
||||
// TODO allow resumability
|
||||
// TODO allow canceling uploads via DELETE
|
||||
// TODO allow cross repo blob mount
|
||||
|
||||
fp, err := GetBlobsPath(layer.Digest)
|
||||
if err != nil {
|
||||
@@ -1064,50 +1159,80 @@ func uploadBlobChunked(mp ModelPath, url string, layer *Layer, regOpts *Registry
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
totalUploaded := 0
|
||||
completed := 0
|
||||
chunkSize := 10 * 1024 * 1024
|
||||
|
||||
r, w := io.Pipe()
|
||||
defer r.Close()
|
||||
for {
|
||||
r, w := io.Pipe()
|
||||
defer r.Close()
|
||||
|
||||
limit := completed + chunkSize
|
||||
if chunkSize >= layer.Size-completed {
|
||||
limit = layer.Size
|
||||
chunkSize = layer.Size - completed
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer w.Close()
|
||||
for {
|
||||
n, err := io.CopyN(w, f, 1024*1024)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("error copying pipe: %v", err),
|
||||
Digest: layer.Digest,
|
||||
Total: layer.Size,
|
||||
Completed: completed,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
completed += int(n)
|
||||
|
||||
go func() {
|
||||
defer w.Close()
|
||||
for {
|
||||
n, err := io.CopyN(w, f, 1024*1024)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("error copying pipe: %v", err),
|
||||
Status: fmt.Sprintf("uploading %s", layer.Digest),
|
||||
Digest: layer.Digest,
|
||||
Total: layer.Size,
|
||||
Completed: totalUploaded,
|
||||
Completed: completed,
|
||||
})
|
||||
return
|
||||
|
||||
if completed >= limit {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
totalUploaded += int(n)
|
||||
headers := make(map[string]string)
|
||||
headers["Content-Type"] = "application/octet-stream"
|
||||
headers["Content-Length"] = strconv.Itoa(chunkSize)
|
||||
headers["Content-Range"] = fmt.Sprintf("%d-%d", completed, limit-1)
|
||||
|
||||
fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("uploading %s", layer.Digest),
|
||||
Digest: layer.Digest,
|
||||
Total: layer.Size,
|
||||
Completed: totalUploaded,
|
||||
})
|
||||
|
||||
if totalUploaded >= layer.Size {
|
||||
return
|
||||
}
|
||||
resp, err := makeRequest(ctx, "PATCH", url, headers, r, regOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}()
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusAccepted {
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return fmt.Errorf("on finish upload registry responded with code %d: %v", resp.StatusCode, string(body))
|
||||
}
|
||||
|
||||
url = resp.Header.Get("Location")
|
||||
if completed >= layer.Size {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
url = fmt.Sprintf("%s&digest=%s", url, layer.Digest)
|
||||
|
||||
headers := make(map[string]string)
|
||||
headers["Content-Type"] = "application/octet-stream"
|
||||
headers["Content-Range"] = fmt.Sprintf("0-%d", layer.Size-1)
|
||||
headers["Content-Length"] = strconv.Itoa(int(layer.Size))
|
||||
headers["Content-Length"] = "0"
|
||||
|
||||
// finish the upload
|
||||
resp, err := makeRequest("PUT", url, headers, r, regOpts)
|
||||
resp, err := makeRequest(ctx, "PUT", url, headers, nil, regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't finish upload: %v", err)
|
||||
return err
|
||||
@@ -1121,7 +1246,46 @@ func uploadBlobChunked(mp ModelPath, url string, layer *Layer, regOpts *Registry
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRequest(method, url string, headers map[string]string, body io.Reader, regOpts *RegistryOptions) (*http.Response, error) {
|
||||
func makeRequestWithRetry(ctx context.Context, method, url string, headers map[string]string, body io.ReadSeeker, regOpts *RegistryOptions) (*http.Response, error) {
|
||||
var status string
|
||||
for try := 0; try < MaxRetries; try++ {
|
||||
resp, err := makeRequest(ctx, method, url, headers, body, regOpts)
|
||||
if err != nil {
|
||||
log.Printf("couldn't start upload: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
status = resp.Status
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusAccepted, http.StatusCreated:
|
||||
return resp, nil
|
||||
case http.StatusUnauthorized:
|
||||
auth := resp.Header.Get("www-authenticate")
|
||||
authRedir := ParseAuthRedirectString(auth)
|
||||
token, err := getAuthToken(ctx, authRedir, regOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
regOpts.Token = token
|
||||
if body != nil {
|
||||
if _, err := body.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
default:
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
return nil, fmt.Errorf("on upload registry responded with code %d: %s", resp.StatusCode, body)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("max retry exceeded: %v", status)
|
||||
}
|
||||
|
||||
func makeRequest(ctx context.Context, method, url string, headers map[string]string, body io.Reader, regOpts *RegistryOptions) (*http.Response, error) {
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
if regOpts.Insecure {
|
||||
url = "http://" + url
|
||||
@@ -1130,18 +1294,7 @@ func makeRequest(method, url string, headers map[string]string, body io.Reader,
|
||||
}
|
||||
}
|
||||
|
||||
// make a copy of the body in case we need to try the call to makeRequest again
|
||||
var buf bytes.Buffer
|
||||
if body != nil {
|
||||
_, err := io.Copy(&buf, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
bodyCopy := bytes.NewReader(buf.Bytes())
|
||||
|
||||
req, err := http.NewRequest(method, url, bodyCopy)
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1165,24 +1318,12 @@ func makeRequest(method, url string, headers map[string]string, body io.Reader,
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the request is unauthenticated, try to authenticate and make the request again
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
auth := resp.Header.Get("Www-Authenticate")
|
||||
authRedir := ParseAuthRedirectString(string(auth))
|
||||
token, err := getAuthToken(authRedir, regOpts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
regOpts.Token = token
|
||||
bodyCopy = bytes.NewReader(buf.Bytes())
|
||||
return makeRequest(method, url, headers, bodyCopy, regOpts)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
@@ -21,14 +21,14 @@ import (
|
||||
"gonum.org/v1/gonum/mat"
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
"github.com/jmorganca/ollama/llama"
|
||||
"github.com/jmorganca/ollama/llm"
|
||||
"github.com/jmorganca/ollama/vector"
|
||||
)
|
||||
|
||||
var loaded struct {
|
||||
mu sync.Mutex
|
||||
|
||||
llm *llama.LLM
|
||||
llm llm.LLM
|
||||
Embeddings []vector.Embedding
|
||||
|
||||
expireAt time.Time
|
||||
@@ -38,6 +38,8 @@ var loaded struct {
|
||||
options api.Options
|
||||
}
|
||||
|
||||
var defaultSessionDuration = 5 * time.Minute
|
||||
|
||||
// load a model into memory if it is not already loaded, it is up to the caller to lock loaded.mu before calling this function
|
||||
func load(model *Model, reqOpts map[string]interface{}, sessionDuration time.Duration) error {
|
||||
opts := api.DefaultOptions()
|
||||
@@ -63,11 +65,16 @@ func load(model *Model, reqOpts map[string]interface{}, sessionDuration time.Dur
|
||||
loaded.Embeddings = model.Embeddings
|
||||
}
|
||||
|
||||
llm, err := llama.New(model.ModelPath, opts)
|
||||
llmModel, err := llm.New(model.ModelPath, model.AdapterPaths, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// set cache values before modifying opts
|
||||
loaded.llm = llmModel
|
||||
loaded.digest = model.Digest
|
||||
loaded.options = opts
|
||||
|
||||
if opts.NumKeep < 0 {
|
||||
promptWithSystem, err := model.Prompt(api.GenerateRequest{}, "")
|
||||
if err != nil {
|
||||
@@ -79,15 +86,13 @@ func load(model *Model, reqOpts map[string]interface{}, sessionDuration time.Dur
|
||||
return err
|
||||
}
|
||||
|
||||
tokensWithSystem := llm.Encode(promptWithSystem)
|
||||
tokensNoSystem := llm.Encode(promptNoSystem)
|
||||
tokensWithSystem := llmModel.Encode(promptWithSystem)
|
||||
tokensNoSystem := llmModel.Encode(promptNoSystem)
|
||||
|
||||
llm.NumKeep = len(tokensWithSystem) - len(tokensNoSystem) + 1
|
||||
opts.NumKeep = len(tokensWithSystem) - len(tokensNoSystem) + 1
|
||||
|
||||
llmModel.SetOptions(opts)
|
||||
}
|
||||
|
||||
loaded.llm = llm
|
||||
loaded.digest = model.Digest
|
||||
loaded.options = opts
|
||||
}
|
||||
loaded.expireAt = time.Now().Add(sessionDuration)
|
||||
|
||||
@@ -131,7 +136,7 @@ func GenerateHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
sessionDuration := 5 * time.Minute
|
||||
sessionDuration := defaultSessionDuration // TODO: set this duration from the request if specified
|
||||
if err := load(model, req.Options, sessionDuration); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
@@ -274,7 +279,8 @@ func PushModelHandler(c *gin.Context) {
|
||||
Password: req.Password,
|
||||
}
|
||||
|
||||
if err := PushModel(req.Name, regOpts, fn); err != nil {
|
||||
ctx := context.Background()
|
||||
if err := PushModel(ctx, req.Name, regOpts, fn); err != nil {
|
||||
ch <- gin.H{"error": err.Error()}
|
||||
}
|
||||
}()
|
||||
|
Reference in New Issue
Block a user