@ -10,22 +10,86 @@ import (
gpt2 "github.com/go-skynet/go-gpt2.cpp"
gpt2 "github.com/go-skynet/go-gpt2.cpp"
gptj "github.com/go-skynet/go-gpt4all-j.cpp"
gptj "github.com/go-skynet/go-gpt4all-j.cpp"
llama "github.com/go-skynet/go-llama.cpp"
llama "github.com/go-skynet/go-llama.cpp"
"github.com/hashicorp/go-multierror"
)
)
// mutex still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
// mutex still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
var mutexMap sync . Mutex
var mutexMap sync . Mutex
var mutexes map [ string ] * sync . Mutex = make ( map [ string ] * sync . Mutex )
var mutexes map [ string ] * sync . Mutex = make ( map [ string ] * sync . Mutex )
var loadedModels map [ string ] interface { } = map [ string ] interface { } { }
var muModels sync . Mutex
func backendLoader ( backendString string , loader * model . ModelLoader , modelFile string , llamaOpts [ ] llama . ModelOption ) ( model interface { } , err error ) {
switch strings . ToLower ( backendString ) {
case "llama" :
return loader . LoadLLaMAModel ( modelFile , llamaOpts ... )
case "stablelm" :
return loader . LoadStableLMModel ( modelFile )
case "gpt2" :
return loader . LoadGPT2Model ( modelFile )
case "gptj" :
return loader . LoadGPTJModel ( modelFile )
default :
return nil , fmt . Errorf ( "backend unsupported: %s" , backendString )
}
}
func greedyLoader ( loader * model . ModelLoader , modelFile string , llamaOpts [ ] llama . ModelOption ) ( model interface { } , err error ) {
updateModels := func ( model interface { } ) {
muModels . Lock ( )
defer muModels . Unlock ( )
loadedModels [ modelFile ] = model
}
muModels . Lock ( )
m , exists := loadedModels [ modelFile ]
if exists {
muModels . Unlock ( )
return m , nil
}
muModels . Unlock ( )
model , modelerr := loader . LoadLLaMAModel ( modelFile , llamaOpts ... )
if modelerr == nil {
updateModels ( model )
return model , nil
} else {
err = multierror . Append ( err , modelerr )
}
model , modelerr = loader . LoadGPTJModel ( modelFile )
if modelerr == nil {
updateModels ( model )
return model , nil
} else {
err = multierror . Append ( err , modelerr )
}
model , modelerr = loader . LoadGPT2Model ( modelFile )
if modelerr == nil {
updateModels ( model )
return model , nil
} else {
err = multierror . Append ( err , modelerr )
}
model , modelerr = loader . LoadStableLMModel ( modelFile )
if modelerr == nil {
updateModels ( model )
return model , nil
} else {
err = multierror . Append ( err , modelerr )
}
return nil , fmt . Errorf ( "could not load model - all backends returned error: %s" , err . Error ( ) )
}
func ModelInference ( s string , loader * model . ModelLoader , c Config , tokenCallback func ( string ) bool ) ( func ( ) ( string , error ) , error ) {
func ModelInference ( s string , loader * model . ModelLoader , c Config , tokenCallback func ( string ) bool ) ( func ( ) ( string , error ) , error ) {
var model * llama . LLama
var gptModel * gptj . GPTJ
var gpt2Model * gpt2 . GPT2
var stableLMModel * gpt2 . StableLM
supportStreams := false
supportStreams := false
modelFile := c . Model
modelFile := c . Model
// Try to load the model
// Try to load the model
var llamaerr , gpt2err , gptjerr , stableerr error
llamaOpts := [ ] llama . ModelOption { }
llamaOpts := [ ] llama . ModelOption { }
if c . ContextSize != 0 {
if c . ContextSize != 0 {
llamaOpts = append ( llamaOpts , llama . SetContext ( c . ContextSize ) )
llamaOpts = append ( llamaOpts , llama . SetContext ( c . ContextSize ) )
@ -34,25 +98,21 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
llamaOpts = append ( llamaOpts , llama . EnableF16Memory )
llamaOpts = append ( llamaOpts , llama . EnableF16Memory )
}
}
// TODO: this is ugly, better identifying the model somehow! however, it is a good stab for a first implementation..
var inferenceModel interface { }
model , llamaerr = loader . LoadLLaMAModel ( modelFile , llamaOpts ... )
var err error
if llamaerr != nil {
if c . Backend == "" {
gptModel , gptjerr = loader . LoadGPTJModel ( modelFile )
inferenceModel , err = greedyLoader ( loader , modelFile , llamaOpts )
if gptjerr != nil {
} else {
gpt2Model , gpt2err = loader . LoadGPT2Model ( modelFile )
inferenceModel , err = backendLoader ( c . Backend , loader , modelFile , llamaOpts )
if gpt2err != nil {
}
stableLMModel , stableerr = loader . LoadStableLMModel ( modelFile )
if err != nil {
if stableerr != nil {
return nil , err
return nil , fmt . Errorf ( "llama: %s gpt: %s gpt2: %s stableLM: %s" , llamaerr . Error ( ) , gptjerr . Error ( ) , gpt2err . Error ( ) , stableerr . Error ( ) ) // llama failed first, so we want to catch both errors
}
}
}
}
}
var fn func ( ) ( string , error )
var fn func ( ) ( string , error )
switch {
switch model := inferenceModel . ( type ) {
case stableLMModel != nil :
case * gpt2 . StableLM :
fn = func ( ) ( string , error ) {
fn = func ( ) ( string , error ) {
// Generate the prediction using the language model
// Generate the prediction using the language model
predictOptions := [ ] gpt2 . PredictOption {
predictOptions := [ ] gpt2 . PredictOption {
@ -71,12 +131,12 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions = append ( predictOptions , gpt2 . SetSeed ( c . Seed ) )
predictOptions = append ( predictOptions , gpt2 . SetSeed ( c . Seed ) )
}
}
return stableLMM odel. Predict (
return m odel. Predict (
s ,
s ,
predictOptions ... ,
predictOptions ... ,
)
)
}
}
case gpt2Model != nil :
case * gpt2 . GPT2 :
fn = func ( ) ( string , error ) {
fn = func ( ) ( string , error ) {
// Generate the prediction using the language model
// Generate the prediction using the language model
predictOptions := [ ] gpt2 . PredictOption {
predictOptions := [ ] gpt2 . PredictOption {
@ -95,12 +155,12 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions = append ( predictOptions , gpt2 . SetSeed ( c . Seed ) )
predictOptions = append ( predictOptions , gpt2 . SetSeed ( c . Seed ) )
}
}
return gpt2M odel. Predict (
return m odel. Predict (
s ,
s ,
predictOptions ... ,
predictOptions ... ,
)
)
}
}
case gptModel != nil :
case * gptj . GPTJ :
fn = func ( ) ( string , error ) {
fn = func ( ) ( string , error ) {
// Generate the prediction using the language model
// Generate the prediction using the language model
predictOptions := [ ] gptj . PredictOption {
predictOptions := [ ] gptj . PredictOption {
@ -119,12 +179,12 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions = append ( predictOptions , gptj . SetSeed ( c . Seed ) )
predictOptions = append ( predictOptions , gptj . SetSeed ( c . Seed ) )
}
}
return gptM odel. Predict (
return m odel. Predict (
s ,
s ,
predictOptions ... ,
predictOptions ... ,
)
)
}
}
case model != nil :
case * llama . LLama :
supportStreams = true
supportStreams = true
fn = func ( ) ( string , error ) {
fn = func ( ) ( string , error ) {