|
|
@ -1,7 +1,11 @@ |
|
|
|
package main |
|
|
|
package main |
|
|
|
|
|
|
|
|
|
|
|
import ( |
|
|
|
import ( |
|
|
|
|
|
|
|
"fmt" |
|
|
|
"os" |
|
|
|
"os" |
|
|
|
|
|
|
|
"path/filepath" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api "github.com/go-skynet/LocalAI/api" |
|
|
|
api "github.com/go-skynet/LocalAI/api" |
|
|
|
model "github.com/go-skynet/LocalAI/pkg/model" |
|
|
|
model "github.com/go-skynet/LocalAI/pkg/model" |
|
|
@ -48,7 +52,7 @@ func main() { |
|
|
|
Name: "models-path", |
|
|
|
Name: "models-path", |
|
|
|
DefaultText: "Path containing models used for inferencing", |
|
|
|
DefaultText: "Path containing models used for inferencing", |
|
|
|
EnvVars: []string{"MODELS_PATH"}, |
|
|
|
EnvVars: []string{"MODELS_PATH"}, |
|
|
|
Value: path, |
|
|
|
Value: filepath.Join(path, "models"), |
|
|
|
}, |
|
|
|
}, |
|
|
|
&cli.StringFlag{ |
|
|
|
&cli.StringFlag{ |
|
|
|
Name: "config-file", |
|
|
|
Name: "config-file", |
|
|
@ -85,6 +89,7 @@ It uses llama.cpp, ggml and gpt4all as backend with golang c bindings. |
|
|
|
UsageText: `local-ai [options]`, |
|
|
|
UsageText: `local-ai [options]`, |
|
|
|
Copyright: "go-skynet authors", |
|
|
|
Copyright: "go-skynet authors", |
|
|
|
Action: func(ctx *cli.Context) error { |
|
|
|
Action: func(ctx *cli.Context) error { |
|
|
|
|
|
|
|
fmt.Printf("Starting LocalAI using %d threads, with models path: %s\n", ctx.Int("threads"), ctx.String("models-path")) |
|
|
|
return api.App(ctx.String("config-file"), model.NewModelLoader(ctx.String("models-path")), ctx.Int("threads"), ctx.Int("context-size"), ctx.Bool("f16"), ctx.Bool("debug"), false).Listen(ctx.String("address")) |
|
|
|
return api.App(ctx.String("config-file"), model.NewModelLoader(ctx.String("models-path")), ctx.Int("threads"), ctx.Int("context-size"), ctx.Bool("f16"), ctx.Bool("debug"), false).Listen(ctx.String("address")) |
|
|
|
}, |
|
|
|
}, |
|
|
|
} |
|
|
|
} |
|
|
|