feature: makefile & updates (#23)
Co-authored-by: mudler <mudler@c3os.io> Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>add/first-example
parent
e8eab66c30
commit
c37175271f
@ -1 +1 @@ |
|||||||
models/*.bin |
models |
||||||
|
@ -1,2 +1,9 @@ |
|||||||
|
# go-llama build artifacts |
||||||
|
go-llama |
||||||
|
|
||||||
|
# llama-cli build binary |
||||||
llama-cli |
llama-cli |
||||||
|
|
||||||
|
# Ignore models |
||||||
models/*.bin |
models/*.bin |
||||||
|
models/ggml-* |
@ -0,0 +1,16 @@ |
|||||||
|
{ |
||||||
|
"version": "0.2.0", |
||||||
|
"configurations": [ |
||||||
|
|
||||||
|
{ |
||||||
|
"name": "Launch Go", |
||||||
|
"type": "go", |
||||||
|
"request": "launch", |
||||||
|
"mode": "debug", |
||||||
|
"program": "${workspaceFolder}/main.go", |
||||||
|
"args": [ |
||||||
|
"api" |
||||||
|
] |
||||||
|
} |
||||||
|
] |
||||||
|
} |
@ -0,0 +1,52 @@ |
|||||||
|
GOCMD=go
|
||||||
|
GOTEST=$(GOCMD) test
|
||||||
|
GOVET=$(GOCMD) vet
|
||||||
|
BINARY_NAME=llama-cli
|
||||||
|
GOLLAMA_VERSION?=llama.cpp-8b67998
|
||||||
|
|
||||||
|
GREEN := $(shell tput -Txterm setaf 2)
|
||||||
|
YELLOW := $(shell tput -Txterm setaf 3)
|
||||||
|
WHITE := $(shell tput -Txterm setaf 7)
|
||||||
|
CYAN := $(shell tput -Txterm setaf 6)
|
||||||
|
RESET := $(shell tput -Txterm sgr0)
|
||||||
|
|
||||||
|
.PHONY: all test build vendor |
||||||
|
|
||||||
|
all: help |
||||||
|
|
||||||
|
## Build:
|
||||||
|
|
||||||
|
build: prepare ## Build the project
|
||||||
|
$(GOCMD) build -o $(BINARY_NAME) ./
|
||||||
|
|
||||||
|
go-llama: |
||||||
|
git clone -b $(GOLLAMA_VERSION) --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
|
||||||
|
|
||||||
|
prepare: go-llama |
||||||
|
$(MAKE) -C go-llama libbinding.a
|
||||||
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama
|
||||||
|
|
||||||
|
clean: ## Remove build related file
|
||||||
|
$(MAKE) -C go-llama clean
|
||||||
|
rm -fr ./go-llama
|
||||||
|
rm -f $(BINARY_NAME)
|
||||||
|
|
||||||
|
## Run:
|
||||||
|
run: prepare |
||||||
|
C_INCLUDE_PATH=$(shell pwd)/go-llama.cpp LIBRARY_PATH=$(shell pwd)/go-llama.cpp $(GOCMD) run ./ api
|
||||||
|
|
||||||
|
## Test:
|
||||||
|
test: ## Run the tests of the project
|
||||||
|
$(GOTEST) -v -race ./... $(OUTPUT_OPTIONS)
|
||||||
|
|
||||||
|
## Help:
|
||||||
|
help: ## Show this help.
|
||||||
|
@echo ''
|
||||||
|
@echo 'Usage:'
|
||||||
|
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}'
|
||||||
|
@echo ''
|
||||||
|
@echo 'Targets:'
|
||||||
|
@awk 'BEGIN {FS = ":.*?## "} { \
|
||||||
|
if (/^[a-zA-Z_-]+:.*?##.*$$/) {printf " ${YELLOW}%-20s${GREEN}%s${RESET}\n", $$1, $$2} \
|
||||||
|
else if (/^## .*$$/) {printf " ${CYAN}%s${RESET}\n", substr($$1,4)} \
|
||||||
|
}' $(MAKEFILE_LIST)
|
@ -1,15 +1,28 @@ |
|||||||
version: '3.6' |
version: '3.6' |
||||||
|
|
||||||
services: |
services: |
||||||
|
|
||||||
|
# chatgpt: |
||||||
|
# image: ghcr.io/mckaywrigley/chatbot-ui:main |
||||||
|
# # platform: linux/amd64 |
||||||
|
# ports: |
||||||
|
# - 3000:3000 |
||||||
|
# environment: |
||||||
|
# - 'OPENAI_API_KEY=sk-000000000000000' |
||||||
|
# - 'OPENAI_API_HOST=http://api:8080' |
||||||
|
|
||||||
api: |
api: |
||||||
image: quay.io/go-skynet/llama-cli:latest |
image: quay.io/go-skynet/llama-cli:latest |
||||||
build: . |
build: |
||||||
volumes: |
context: . |
||||||
- ./models:/models |
dockerfile: Dockerfile |
||||||
ports: |
ports: |
||||||
- 8080:8080 |
- 8080:8080 |
||||||
environment: |
environment: |
||||||
- MODELS_PATH=/models |
- MODELS_PATH=$MODELS_PATH |
||||||
- CONTEXT_SIZE=700 |
- CONTEXT_SIZE=$CONTEXT_SIZE |
||||||
- THREADS=$THREADS |
- THREADS=$THREADS |
||||||
|
volumes: |
||||||
|
- ./models:/models:cached |
||||||
command: api |
command: api |
||||||
|
|
Loading…
Reference in new issue