Major API enhancements (#44)

add/first-example v1.1.0
Ettore Di Giacinto 2 years ago committed by GitHub
parent c905512bb0
commit d517a54e28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 1
      .env
  2. 2
      README.md
  3. 105
      api/api.go
  4. 1
      docker-compose.yaml
  5. 3
      go.mod
  6. 10
      go.sum
  7. 17
      main.go
  8. 94
      pkg/model/loader.go

@ -1,3 +1,4 @@
THREADS=14 THREADS=14
CONTEXT_SIZE=512 CONTEXT_SIZE=512
MODELS_PATH=/models MODELS_PATH=/models
# DEBUG=true

@ -82,7 +82,7 @@ See the [prompt-templates](https://github.com/go-skynet/LocalAI/tree/master/prom
Example of starting the API with `docker`: Example of starting the API with `docker`:
```bash ```bash
docker run -p 8080:8080 -ti --rm quay.io/go-skynet/local-api:latest --models-path /path/to/models --context-size 700 --threads 4 docker run -p 8080:8080 -ti --rm quay.io/go-skynet/local-ai:latest --models-path /path/to/models --context-size 700 --threads 4
``` ```
And you'll see: And you'll see:

@ -1,6 +1,8 @@
package api package api
import ( import (
"encoding/json"
"errors"
"fmt" "fmt"
"strings" "strings"
"sync" "sync"
@ -11,6 +13,7 @@ import (
"github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors" "github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/recover" "github.com/gofiber/fiber/v2/middleware/recover"
"github.com/rs/zerolog/log"
) )
type OpenAIResponse struct { type OpenAIResponse struct {
@ -65,7 +68,7 @@ type OpenAIRequest struct {
} }
// https://platform.openai.com/docs/api-reference/completions // https://platform.openai.com/docs/api-reference/completions
func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16 bool, defaultMutex *sync.Mutex, mutexMap *sync.Mutex, mutexes map[string]*sync.Mutex) func(c *fiber.Ctx) error { func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16 bool, mutexMap *sync.Mutex, mutexes map[string]*sync.Mutex) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error { return func(c *fiber.Ctx) error {
var err error var err error
var model *llama.LLama var model *llama.LLama
@ -76,10 +79,23 @@ func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16
if err := c.BodyParser(input); err != nil { if err := c.BodyParser(input); err != nil {
return err return err
} }
modelFile := input.Model
received, _ := json.Marshal(input)
if input.Model == "" { log.Debug().Msgf("Request received: %s", string(received))
// Set model from bearer token, if available
bearer := strings.TrimLeft(c.Get("authorization"), "Bearer ")
bearerExists := bearer != "" && loader.ExistsInModelPath(bearer)
if modelFile == "" && !bearerExists {
return fmt.Errorf("no model specified") return fmt.Errorf("no model specified")
} else { }
if bearerExists { // model specified in bearer token takes precedence
log.Debug().Msgf("Using model from bearer token: %s", bearer)
modelFile = bearer
}
// Try to load the model with both // Try to load the model with both
var llamaerr error var llamaerr error
llamaOpts := []llama.ModelOption{} llamaOpts := []llama.ModelOption{}
@ -90,31 +106,25 @@ func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16
llamaOpts = append(llamaOpts, llama.EnableF16Memory) llamaOpts = append(llamaOpts, llama.EnableF16Memory)
} }
model, llamaerr = loader.LoadLLaMAModel(input.Model, llamaOpts...) model, llamaerr = loader.LoadLLaMAModel(modelFile, llamaOpts...)
if llamaerr != nil { if llamaerr != nil {
gptModel, err = loader.LoadGPTJModel(input.Model) gptModel, err = loader.LoadGPTJModel(modelFile)
if err != nil { if err != nil {
return fmt.Errorf("llama: %s gpt: %s", llamaerr.Error(), err.Error()) // llama failed first, so we want to catch both errors return fmt.Errorf("llama: %s gpt: %s", llamaerr.Error(), err.Error()) // llama failed first, so we want to catch both errors
} }
} }
}
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784 // This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
if input.Model != "" {
mutexMap.Lock() mutexMap.Lock()
l, ok := mutexes[input.Model] l, ok := mutexes[modelFile]
if !ok { if !ok {
m := &sync.Mutex{} m := &sync.Mutex{}
mutexes[input.Model] = m mutexes[modelFile] = m
l = m l = m
} }
mutexMap.Unlock() mutexMap.Unlock()
l.Lock() l.Lock()
defer l.Unlock() defer l.Unlock()
} else {
defaultMutex.Lock()
defer defaultMutex.Unlock()
}
// Set the parameters for the language model prediction // Set the parameters for the language model prediction
topP := input.TopP topP := input.TopP
@ -139,6 +149,7 @@ func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16
predInput := input.Prompt predInput := input.Prompt
if chat { if chat {
mess := []string{} mess := []string{}
// TODO: encode roles
for _, i := range input.Messages { for _, i := range input.Messages {
mess = append(mess, i.Content) mess = append(mess, i.Content)
} }
@ -147,11 +158,12 @@ func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16
} }
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix // A model can have a "file.bin.tmpl" file associated with a prompt template prefix
templatedInput, err := loader.TemplatePrefix(input.Model, struct { templatedInput, err := loader.TemplatePrefix(modelFile, struct {
Input string Input string
}{Input: predInput}) }{Input: predInput})
if err == nil { if err == nil {
predInput = templatedInput predInput = templatedInput
log.Debug().Msgf("Template found, input modified to: %s", predInput)
} }
result := []Choice{} result := []Choice{}
@ -223,8 +235,6 @@ func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16
} }
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
var prediction string
prediction, err := predFunc() prediction, err := predFunc()
if err != nil { if err != nil {
return err return err
@ -241,30 +251,19 @@ func openAIEndpoint(chat bool, loader *model.ModelLoader, threads, ctx int, f16
} }
} }
jsonResult, _ := json.Marshal(result)
log.Debug().Msgf("Response: %s", jsonResult)
// Return the prediction in the response body // Return the prediction in the response body
return c.JSON(OpenAIResponse{ return c.JSON(OpenAIResponse{
Model: input.Model, Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: result, Choices: result,
}) })
} }
} }
func Start(loader *model.ModelLoader, listenAddr string, threads, ctxSize int, f16 bool) error { func listModels(loader *model.ModelLoader) func(ctx *fiber.Ctx) error {
app := fiber.New() return func(c *fiber.Ctx) error {
// Default middleware config
app.Use(recover.New())
app.Use(cors.New())
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
var mutex = &sync.Mutex{}
mu := map[string]*sync.Mutex{}
var mumutex = &sync.Mutex{}
// openAI compatible API endpoint
app.Post("/v1/chat/completions", openAIEndpoint(true, loader, threads, ctxSize, f16, mutex, mumutex, mu))
app.Post("/v1/completions", openAIEndpoint(false, loader, threads, ctxSize, f16, mutex, mumutex, mu))
app.Get("/v1/models", func(c *fiber.Ctx) error {
models, err := loader.ListModels() models, err := loader.ListModels()
if err != nil { if err != nil {
return err return err
@ -281,8 +280,48 @@ func Start(loader *model.ModelLoader, listenAddr string, threads, ctxSize int, f
Object: "list", Object: "list",
Data: dataModels, Data: dataModels,
}) })
}
}
func Start(loader *model.ModelLoader, listenAddr string, threads, ctxSize int, f16 bool) error {
// Return errors as JSON responses
app := fiber.New(fiber.Config{
// Override default error handler
ErrorHandler: func(ctx *fiber.Ctx, err error) error {
// Status code defaults to 500
code := fiber.StatusInternalServerError
// Retrieve the custom status code if it's a *fiber.Error
var e *fiber.Error
if errors.As(err, &e) {
code = e.Code
}
// Send custom error page
return ctx.Status(code).JSON(struct {
Error string `json:"error"`
}{Error: err.Error()})
},
}) })
// Default middleware config
app.Use(recover.New())
app.Use(cors.New())
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
mu := map[string]*sync.Mutex{}
var mumutex = &sync.Mutex{}
// openAI compatible API endpoint
app.Post("/v1/chat/completions", openAIEndpoint(true, loader, threads, ctxSize, f16, mumutex, mu))
app.Post("/chat/completions", openAIEndpoint(true, loader, threads, ctxSize, f16, mumutex, mu))
app.Post("/v1/completions", openAIEndpoint(false, loader, threads, ctxSize, f16, mumutex, mu))
app.Post("/completions", openAIEndpoint(false, loader, threads, ctxSize, f16, mumutex, mu))
app.Get("/v1/models", listModels(loader))
app.Get("/models", listModels(loader))
// Start the server // Start the server
app.Listen(listenAddr) app.Listen(listenAddr)
return nil return nil

@ -14,5 +14,6 @@ services:
- MODELS_PATH=$MODELS_PATH - MODELS_PATH=$MODELS_PATH
- CONTEXT_SIZE=$CONTEXT_SIZE - CONTEXT_SIZE=$CONTEXT_SIZE
- THREADS=$THREADS - THREADS=$THREADS
- DEBUG=$DEBUG
volumes: volumes:
- ./models:/models:cached - ./models:/models:cached

@ -3,15 +3,16 @@ module github.com/go-skynet/LocalAI
go 1.19 go 1.19
require ( require (
github.com/go-skynet/go-gpt4all-j.cpp v0.0.0-20230419091210-303cf2a59a94
github.com/go-skynet/go-llama.cpp v0.0.0-20230415213228-bac222030640 github.com/go-skynet/go-llama.cpp v0.0.0-20230415213228-bac222030640
github.com/gofiber/fiber/v2 v2.42.0 github.com/gofiber/fiber/v2 v2.42.0
github.com/rs/zerolog v1.29.1
github.com/urfave/cli/v2 v2.25.0 github.com/urfave/cli/v2 v2.25.0
) )
require ( require (
github.com/andybalholm/brotli v1.0.4 // indirect github.com/andybalholm/brotli v1.0.4 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/go-skynet/go-gpt4all-j.cpp v0.0.0-20230419091210-303cf2a59a94 // indirect
github.com/google/uuid v1.3.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/klauspost/compress v1.15.9 // indirect github.com/klauspost/compress v1.15.9 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect

@ -1,5 +1,6 @@
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
@ -8,6 +9,7 @@ github.com/go-skynet/go-gpt4all-j.cpp v0.0.0-20230419091210-303cf2a59a94/go.mod
github.com/go-skynet/go-llama.cpp v0.0.0-20230415213228-bac222030640 h1:8SSVbQ3yvq7JnfLCLF4USV0PkQnnduUkaNCv/hHDa3E= github.com/go-skynet/go-llama.cpp v0.0.0-20230415213228-bac222030640 h1:8SSVbQ3yvq7JnfLCLF4USV0PkQnnduUkaNCv/hHDa3E=
github.com/go-skynet/go-llama.cpp v0.0.0-20230415213228-bac222030640/go.mod h1:35AKIEMY+YTKCBJIa/8GZcNGJ2J+nQk1hQiWo/OnEWw= github.com/go-skynet/go-llama.cpp v0.0.0-20230415213228-bac222030640/go.mod h1:35AKIEMY+YTKCBJIa/8GZcNGJ2J+nQk1hQiWo/OnEWw=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofiber/fiber/v2 v2.42.0 h1:Fnp7ybWvS+sjNQsFvkhf4G8OhXswvB6Vee8hM/LyS+8= github.com/gofiber/fiber/v2 v2.42.0 h1:Fnp7ybWvS+sjNQsFvkhf4G8OhXswvB6Vee8hM/LyS+8=
github.com/gofiber/fiber/v2 v2.42.0/go.mod h1:3+SGNjqMh5VQH5Vz2Wdi43zTIV16ktlFd3x3R6O1Zlc= github.com/gofiber/fiber/v2 v2.42.0/go.mod h1:3+SGNjqMh5VQH5Vz2Wdi43zTIV16ktlFd3x3R6O1Zlc=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
@ -16,8 +18,10 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY= github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
@ -27,8 +31,12 @@ github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc=
github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4= github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
@ -67,6 +75,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=

@ -1,20 +1,23 @@
package main package main
import ( import (
"fmt"
"os" "os"
"runtime" "runtime"
api "github.com/go-skynet/LocalAI/api" api "github.com/go-skynet/LocalAI/api"
model "github.com/go-skynet/LocalAI/pkg/model" model "github.com/go-skynet/LocalAI/pkg/model"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
func main() { func main() {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
path, err := os.Getwd() path, err := os.Getwd()
if err != nil { if err != nil {
fmt.Println(err) log.Error().Msgf("error: %s", err.Error())
os.Exit(1) os.Exit(1)
} }
@ -26,6 +29,10 @@ func main() {
Name: "f16", Name: "f16",
EnvVars: []string{"F16"}, EnvVars: []string{"F16"},
}, },
&cli.BoolFlag{
Name: "debug",
EnvVars: []string{"DEBUG"},
},
&cli.IntFlag{ &cli.IntFlag{
Name: "threads", Name: "threads",
DefaultText: "Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested.", DefaultText: "Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested.",
@ -66,13 +73,17 @@ It uses llama.cpp and gpt4all as backend, supporting all the models supported by
UsageText: `local-ai [options]`, UsageText: `local-ai [options]`,
Copyright: "go-skynet authors", Copyright: "go-skynet authors",
Action: func(ctx *cli.Context) error { Action: func(ctx *cli.Context) error {
zerolog.SetGlobalLevel(zerolog.InfoLevel)
if ctx.Bool("debug") {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
}
return api.Start(model.NewModelLoader(ctx.String("models-path")), ctx.String("address"), ctx.Int("threads"), ctx.Int("context-size"), ctx.Bool("f16")) return api.Start(model.NewModelLoader(ctx.String("models-path")), ctx.String("address"), ctx.Int("threads"), ctx.Int("context-size"), ctx.Bool("f16"))
}, },
} }
err = app.Run(os.Args) err = app.Run(os.Args)
if err != nil { if err != nil {
fmt.Println(err) log.Error().Msgf("error: %s", err.Error())
os.Exit(1) os.Exit(1)
} }
} }

@ -10,6 +10,8 @@ import (
"sync" "sync"
"text/template" "text/template"
"github.com/rs/zerolog/log"
gptj "github.com/go-skynet/go-gpt4all-j.cpp" gptj "github.com/go-skynet/go-gpt4all-j.cpp"
llama "github.com/go-skynet/go-llama.cpp" llama "github.com/go-skynet/go-llama.cpp"
) )
@ -26,6 +28,11 @@ func NewModelLoader(modelPath string) *ModelLoader {
return &ModelLoader{modelPath: modelPath, gptmodels: make(map[string]*gptj.GPTJ), models: make(map[string]*llama.LLama), promptsTemplates: make(map[string]*template.Template)} return &ModelLoader{modelPath: modelPath, gptmodels: make(map[string]*gptj.GPTJ), models: make(map[string]*llama.LLama), promptsTemplates: make(map[string]*template.Template)}
} }
func (ml *ModelLoader) ExistsInModelPath(s string) bool {
_, err := os.Stat(filepath.Join(ml.modelPath, s))
return err == nil
}
func (ml *ModelLoader) ListModels() ([]string, error) { func (ml *ModelLoader) ListModels() ([]string, error) {
files, err := ioutil.ReadDir(ml.modelPath) files, err := ioutil.ReadDir(ml.modelPath)
if err != nil { if err != nil {
@ -34,9 +41,12 @@ func (ml *ModelLoader) ListModels() ([]string, error) {
models := []string{} models := []string{}
for _, file := range files { for _, file := range files {
if strings.HasSuffix(file.Name(), ".bin") { // Skip templates, YAML and .keep files
models = append(models, strings.TrimRight(file.Name(), ".bin")) if strings.HasSuffix(file.Name(), ".tmpl") || strings.HasSuffix(file.Name(), ".keep") || strings.HasSuffix(file.Name(), ".yaml") || strings.HasSuffix(file.Name(), ".yml") {
continue
} }
models = append(models, file.Name())
} }
return models, nil return models, nil
@ -47,14 +57,9 @@ func (ml *ModelLoader) TemplatePrefix(modelName string, in interface{}) (string,
defer ml.mu.Unlock() defer ml.mu.Unlock()
m, ok := ml.promptsTemplates[modelName] m, ok := ml.promptsTemplates[modelName]
if !ok {
// try to find a s.bin
modelBin := fmt.Sprintf("%s.bin", modelName)
m, ok = ml.promptsTemplates[modelBin]
if !ok { if !ok {
return "", fmt.Errorf("no prompt template available") return "", fmt.Errorf("no prompt template available")
} }
}
var buf bytes.Buffer var buf bytes.Buffer
@ -64,15 +69,21 @@ func (ml *ModelLoader) TemplatePrefix(modelName string, in interface{}) (string,
return buf.String(), nil return buf.String(), nil
} }
func (ml *ModelLoader) loadTemplate(modelName, modelFile string) error { func (ml *ModelLoader) loadTemplateIfExists(modelName, modelFile string) error {
modelTemplateFile := fmt.Sprintf("%s.tmpl", modelFile) // Check if the template was already loaded
if _, ok := ml.promptsTemplates[modelName]; ok {
return nil
}
// Check if the model path exists // Check if the model path exists
if _, err := os.Stat(modelTemplateFile); err != nil { // skip any error here - we run anyway if a template is not exist
modelTemplateFile := fmt.Sprintf("%s.tmpl", modelName)
if !ml.ExistsInModelPath(modelTemplateFile) {
return nil return nil
} }
dat, err := os.ReadFile(modelTemplateFile) dat, err := os.ReadFile(filepath.Join(ml.modelPath, modelTemplateFile))
if err != nil { if err != nil {
return err return err
} }
@ -92,36 +103,30 @@ func (ml *ModelLoader) LoadGPTJModel(modelName string) (*gptj.GPTJ, error) {
defer ml.mu.Unlock() defer ml.mu.Unlock()
// Check if we already have a loaded model // Check if we already have a loaded model
modelFile := filepath.Join(ml.modelPath, modelName) if !ml.ExistsInModelPath(modelName) {
return nil, fmt.Errorf("model does not exist")
if m, ok := ml.gptmodels[modelFile]; ok {
return m, nil
} }
// Check if the model path exists if m, ok := ml.gptmodels[modelName]; ok {
if _, err := os.Stat(modelFile); os.IsNotExist(err) { log.Debug().Msgf("Model already loaded in memory: %s", modelName)
// try to find a s.bin return m, nil
modelBin := fmt.Sprintf("%s.bin", modelFile)
if _, err := os.Stat(modelBin); os.IsNotExist(err) {
return nil, err
} else {
modelName = fmt.Sprintf("%s.bin", modelName)
modelFile = modelBin
}
} }
// Load the model and keep it in memory for later use // Load the model and keep it in memory for later use
modelFile := filepath.Join(ml.modelPath, modelName)
log.Debug().Msgf("Loading model in memory from file: %s", modelFile)
model, err := gptj.New(modelFile) model, err := gptj.New(modelFile)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// If there is a prompt template, load it // If there is a prompt template, load it
if err := ml.loadTemplate(modelName, modelFile); err != nil { if err := ml.loadTemplateIfExists(modelName, modelFile); err != nil {
return nil, err return nil, err
} }
ml.gptmodels[modelFile] = model ml.gptmodels[modelName] = model
return model, err return model, err
} }
@ -129,40 +134,39 @@ func (ml *ModelLoader) LoadLLaMAModel(modelName string, opts ...llama.ModelOptio
ml.mu.Lock() ml.mu.Lock()
defer ml.mu.Unlock() defer ml.mu.Unlock()
log.Debug().Msgf("Loading model name: %s", modelName)
// Check if we already have a loaded model // Check if we already have a loaded model
modelFile := filepath.Join(ml.modelPath, modelName) if !ml.ExistsInModelPath(modelName) {
if m, ok := ml.models[modelFile]; ok { return nil, fmt.Errorf("model does not exist")
}
if m, ok := ml.models[modelName]; ok {
log.Debug().Msgf("Model already loaded in memory: %s", modelName)
return m, nil return m, nil
} }
// TODO: This needs refactoring, it's really bad to have it in here // TODO: This needs refactoring, it's really bad to have it in here
// Check if we have a GPTJ model loaded instead // Check if we have a GPTJ model loaded instead - if we do we return an error so the API tries with GPTJ
if _, ok := ml.gptmodels[modelFile]; ok { if _, ok := ml.gptmodels[modelName]; ok {
log.Debug().Msgf("Model is GPTJ: %s", modelName)
return nil, fmt.Errorf("this model is a GPTJ one") return nil, fmt.Errorf("this model is a GPTJ one")
} }
// Check if the model path exists
if _, err := os.Stat(modelFile); os.IsNotExist(err) {
// try to find a s.bin
modelBin := fmt.Sprintf("%s.bin", modelFile)
if _, err := os.Stat(modelBin); os.IsNotExist(err) {
return nil, err
} else {
modelName = fmt.Sprintf("%s.bin", modelName)
modelFile = modelBin
}
}
// Load the model and keep it in memory for later use // Load the model and keep it in memory for later use
modelFile := filepath.Join(ml.modelPath, modelName)
log.Debug().Msgf("Loading model in memory from file: %s", modelFile)
model, err := llama.New(modelFile, opts...) model, err := llama.New(modelFile, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// If there is a prompt template, load it // If there is a prompt template, load it
if err := ml.loadTemplate(modelName, modelFile); err != nil { if err := ml.loadTemplateIfExists(modelName, modelFile); err != nil {
return nil, err return nil, err
} }
ml.models[modelFile] = model ml.models[modelName] = model
return model, err return model, err
} }

Loading…
Cancel
Save