Compare commits
3 Commits
master
...
examples_u
Author | SHA1 | Date |
---|---|---|
mudler | 4c3c6fcaf7 | 1 year ago |
mudler | 6a13cf957c | 1 year ago |
mudler | 3e0b75b5e2 | 1 year ago |
@ -1 +0,0 @@ |
|||||||
{{.Input}} |
|
@ -1,16 +0,0 @@ |
|||||||
name: gpt-3.5-turbo |
|
||||||
parameters: |
|
||||||
model: ggml-gpt4all-j |
|
||||||
top_k: 80 |
|
||||||
temperature: 0.2 |
|
||||||
top_p: 0.7 |
|
||||||
context_size: 1024 |
|
||||||
stopwords: |
|
||||||
- "HUMAN:" |
|
||||||
- "GPT:" |
|
||||||
roles: |
|
||||||
user: " " |
|
||||||
system: " " |
|
||||||
template: |
|
||||||
completion: completion |
|
||||||
chat: gpt4all |
|
@ -1,4 +0,0 @@ |
|||||||
The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. |
|
||||||
### Prompt: |
|
||||||
{{.Input}} |
|
||||||
### Response: |
|
@ -0,0 +1,26 @@ |
|||||||
|
# flowise |
||||||
|
|
||||||
|
Example of integration with [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise). |
||||||
|
|
||||||
|
![Screenshot from 2023-05-30 18-01-03](https://github.com/go-skynet/LocalAI/assets/2420543/02458782-0549-4131-971c-95ee56ec1af8) |
||||||
|
|
||||||
|
You can check a demo video in the Flowise PR: https://github.com/FlowiseAI/Flowise/pull/123 |
||||||
|
|
||||||
|
## Run |
||||||
|
|
||||||
|
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml` |
||||||
|
```bash |
||||||
|
# Clone LocalAI |
||||||
|
git clone https://github.com/go-skynet/LocalAI |
||||||
|
|
||||||
|
cd LocalAI/examples/flowise |
||||||
|
|
||||||
|
# start with docker-compose |
||||||
|
docker-compose up --pull always |
||||||
|
|
||||||
|
``` |
||||||
|
|
||||||
|
## Accessing flowise |
||||||
|
|
||||||
|
Open http://localhost:3000. |
||||||
|
|
@ -0,0 +1,37 @@ |
|||||||
|
version: '3.6' |
||||||
|
|
||||||
|
services: |
||||||
|
api: |
||||||
|
image: quay.io/go-skynet/local-ai:latest |
||||||
|
# As initially LocalAI will download the models defined in PRELOAD_MODELS |
||||||
|
# you might need to tweak the healthcheck values here according to your network connection. |
||||||
|
# Here we give a timespan of 20m to download all the required files. |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] |
||||||
|
interval: 1m |
||||||
|
timeout: 20m |
||||||
|
retries: 20 |
||||||
|
build: |
||||||
|
context: ../../ |
||||||
|
dockerfile: Dockerfile |
||||||
|
ports: |
||||||
|
- 8080:8080 |
||||||
|
environment: |
||||||
|
- DEBUG=true |
||||||
|
- MODELS_PATH=/models |
||||||
|
# You can preload different models here as well. |
||||||
|
# See: https://github.com/go-skynet/model-gallery |
||||||
|
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]' |
||||||
|
volumes: |
||||||
|
- ./models:/models:cached |
||||||
|
command: ["/usr/bin/local-ai" ] |
||||||
|
flowise: |
||||||
|
depends_on: |
||||||
|
api: |
||||||
|
condition: service_healthy |
||||||
|
image: flowiseai/flowise |
||||||
|
ports: |
||||||
|
- 3000:3000 |
||||||
|
volumes: |
||||||
|
- ~/.flowise:/root/.flowise |
||||||
|
command: /bin/sh -c "sleep 3; flowise start" |
@ -0,0 +1,28 @@ |
|||||||
|
## Telegram bot |
||||||
|
|
||||||
|
This example uses [chatgpt-telegram-bot](https://github.com/karfly/chatgpt_telegram_bot) to deploy a telegram bot with LocalAI instead of OpenAI. |
||||||
|
|
||||||
|
```bash |
||||||
|
# Clone LocalAI |
||||||
|
git clone https://github.com/go-skynet/LocalAI |
||||||
|
|
||||||
|
cd LocalAI/examples/telegram-bot |
||||||
|
|
||||||
|
git clone https://github.com/karfly/chatgpt_telegram_bot |
||||||
|
|
||||||
|
cp -rf docker-compose.yml chatgpt_telegram_bot |
||||||
|
|
||||||
|
cd chatgpt_telegram_bot |
||||||
|
|
||||||
|
mv config/config.example.yml config/config.yml |
||||||
|
mv config/config.example.env config/config.env |
||||||
|
|
||||||
|
# Edit config/config.yml to set the telegram bot token |
||||||
|
vim config/config.yml |
||||||
|
|
||||||
|
# run the bot |
||||||
|
docker-compose --env-file config/config.env up --build |
||||||
|
``` |
||||||
|
|
||||||
|
Note: LocalAI is configured to download `gpt4all-j` in place of `gpt-3.5-turbo` and `stablediffusion` for image generation at the first start. Download size is >6GB, if your network connection is slow, adapt the `docker-compose.yml` file healthcheck section accordingly (replace `20m`, for instance with `1h`, etc.). |
||||||
|
To configure models manually, comment the `PRELOAD_MODELS` environment variable in the `docker-compose.yml` file and see for instance the [chatbot-ui-manual example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual) `model` directory. |
@ -0,0 +1,66 @@ |
|||||||
|
version: "3" |
||||||
|
|
||||||
|
services: |
||||||
|
api: |
||||||
|
image: quay.io/go-skynet/local-ai:v1.18.0-ffmpeg |
||||||
|
# As initially LocalAI will download the models defined in PRELOAD_MODELS |
||||||
|
# you might need to tweak the healthcheck values here according to your network connection. |
||||||
|
# Here we give a timespan of 20m to download all the required files. |
||||||
|
healthcheck: |
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] |
||||||
|
interval: 1m |
||||||
|
timeout: 20m |
||||||
|
retries: 20 |
||||||
|
ports: |
||||||
|
- 8080:8080 |
||||||
|
environment: |
||||||
|
- DEBUG=true |
||||||
|
- MODELS_PATH=/models |
||||||
|
- IMAGE_PATH=/tmp |
||||||
|
# You can preload different models here as well. |
||||||
|
# See: https://github.com/go-skynet/model-gallery |
||||||
|
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, {"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}, {"url": "github:go-skynet/model-gallery/whisper-base.yaml", "name": "whisper-1"}]' |
||||||
|
volumes: |
||||||
|
- ./models:/models:cached |
||||||
|
command: ["/usr/bin/local-ai" ] |
||||||
|
mongo: |
||||||
|
container_name: mongo |
||||||
|
image: mongo:latest |
||||||
|
restart: always |
||||||
|
ports: |
||||||
|
- 127.0.0.1:${MONGODB_PORT:-27017}:${MONGODB_PORT:-27017} |
||||||
|
volumes: |
||||||
|
- ${MONGODB_PATH:-./mongodb}:/data/db |
||||||
|
# TODO: add auth |
||||||
|
|
||||||
|
chatgpt_telegram_bot: |
||||||
|
container_name: chatgpt_telegram_bot |
||||||
|
command: python3 bot/bot.py |
||||||
|
restart: always |
||||||
|
environment: |
||||||
|
- OPENAI_API_KEY=sk---anystringhere |
||||||
|
- OPENAI_API_BASE=http://api:8080/v1 |
||||||
|
build: |
||||||
|
context: "." |
||||||
|
dockerfile: Dockerfile |
||||||
|
depends_on: |
||||||
|
api: |
||||||
|
condition: service_healthy |
||||||
|
mongo: |
||||||
|
condition: service_started |
||||||
|
|
||||||
|
mongo_express: |
||||||
|
container_name: mongo-express |
||||||
|
image: mongo-express:latest |
||||||
|
restart: always |
||||||
|
ports: |
||||||
|
- 127.0.0.1:${MONGO_EXPRESS_PORT:-8081}:${MONGO_EXPRESS_PORT:-8081} |
||||||
|
environment: |
||||||
|
- ME_CONFIG_MONGODB_SERVER=mongo |
||||||
|
- ME_CONFIG_MONGODB_PORT=${MONGODB_PORT:-27017} |
||||||
|
- ME_CONFIG_MONGODB_ENABLE_ADMIN=false |
||||||
|
- ME_CONFIG_MONGODB_AUTH_DATABASE=chatgpt_telegram_bot |
||||||
|
- ME_CONFIG_BASICAUTH_USERNAME=${MONGO_EXPRESS_USERNAME:-username} |
||||||
|
- ME_CONFIG_BASICAUTH_PASSWORD=${MONGO_EXPRESS_PASSWORD:-password} |
||||||
|
depends_on: |
||||||
|
- mongo |
Loading…
Reference in new issue