examples(telegram): add (#547)
parent
6f0bdbd01c
commit
3b3164b039
@ -0,0 +1,30 @@ |
||||
## Telegram bot |
||||
|
||||
![Screenshot from 2023-06-09 00-36-26](https://github.com/go-skynet/LocalAI/assets/2420543/e98b4305-fa2d-41cf-9d2f-1bb2d75ca902) |
||||
|
||||
This example uses a fork of [chatgpt-telegram-bot](https://github.com/karfly/chatgpt_telegram_bot) to deploy a telegram bot with LocalAI instead of OpenAI. |
||||
|
||||
```bash |
||||
# Clone LocalAI |
||||
git clone https://github.com/go-skynet/LocalAI |
||||
|
||||
cd LocalAI/examples/telegram-bot |
||||
|
||||
git clone https://github.com/mudler/chatgpt_telegram_bot |
||||
|
||||
cp -rf docker-compose.yml chatgpt_telegram_bot |
||||
|
||||
cd chatgpt_telegram_bot |
||||
|
||||
mv config/config.example.yml config/config.yml |
||||
mv config/config.example.env config/config.env |
||||
|
||||
# Edit config/config.yml to set the telegram bot token |
||||
vim config/config.yml |
||||
|
||||
# run the bot |
||||
docker-compose --env-file config/config.env up --build |
||||
``` |
||||
|
||||
Note: LocalAI is configured to download `gpt4all-j` in place of `gpt-3.5-turbo` and `stablediffusion` for image generation at the first start. Download size is >6GB, if your network connection is slow, adapt the `docker-compose.yml` file healthcheck section accordingly (replace `20m`, for instance with `1h`, etc.). |
||||
To configure models manually, comment the `PRELOAD_MODELS` environment variable in the `docker-compose.yml` file and see for instance the [chatbot-ui-manual example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual) `model` directory. |
@ -0,0 +1,66 @@ |
||||
version: "3" |
||||
|
||||
services: |
||||
api: |
||||
image: quay.io/go-skynet/local-ai:v1.18.0-ffmpeg |
||||
# As initially LocalAI will download the models defined in PRELOAD_MODELS |
||||
# you might need to tweak the healthcheck values here according to your network connection. |
||||
# Here we give a timespan of 20m to download all the required files. |
||||
healthcheck: |
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] |
||||
interval: 1m |
||||
timeout: 20m |
||||
retries: 20 |
||||
ports: |
||||
- 8080:8080 |
||||
environment: |
||||
- DEBUG=true |
||||
- MODELS_PATH=/models |
||||
- IMAGE_PATH=/tmp |
||||
# You can preload different models here as well. |
||||
# See: https://github.com/go-skynet/model-gallery |
||||
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, {"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}, {"url": "github:go-skynet/model-gallery/whisper-base.yaml", "name": "whisper-1"}]' |
||||
volumes: |
||||
- ./models:/models:cached |
||||
command: ["/usr/bin/local-ai" ] |
||||
mongo: |
||||
container_name: mongo |
||||
image: mongo:latest |
||||
restart: always |
||||
ports: |
||||
- 127.0.0.1:${MONGODB_PORT:-27017}:${MONGODB_PORT:-27017} |
||||
volumes: |
||||
- ${MONGODB_PATH:-./mongodb}:/data/db |
||||
# TODO: add auth |
||||
|
||||
chatgpt_telegram_bot: |
||||
container_name: chatgpt_telegram_bot |
||||
command: python3 bot/bot.py |
||||
restart: always |
||||
environment: |
||||
- OPENAI_API_KEY=sk---anystringhere |
||||
- OPENAI_API_BASE=http://api:8080/v1 |
||||
build: |
||||
context: "." |
||||
dockerfile: Dockerfile |
||||
depends_on: |
||||
api: |
||||
condition: service_healthy |
||||
mongo: |
||||
condition: service_started |
||||
|
||||
mongo_express: |
||||
container_name: mongo-express |
||||
image: mongo-express:latest |
||||
restart: always |
||||
ports: |
||||
- 127.0.0.1:${MONGO_EXPRESS_PORT:-8081}:${MONGO_EXPRESS_PORT:-8081} |
||||
environment: |
||||
- ME_CONFIG_MONGODB_SERVER=mongo |
||||
- ME_CONFIG_MONGODB_PORT=${MONGODB_PORT:-27017} |
||||
- ME_CONFIG_MONGODB_ENABLE_ADMIN=false |
||||
- ME_CONFIG_MONGODB_AUTH_DATABASE=chatgpt_telegram_bot |
||||
- ME_CONFIG_BASICAUTH_USERNAME=${MONGO_EXPRESS_USERNAME:-username} |
||||
- ME_CONFIG_BASICAUTH_PASSWORD=${MONGO_EXPRESS_PASSWORD:-password} |
||||
depends_on: |
||||
- mongo |
Loading…
Reference in new issue