example(slack-qa-bot): Add slack QA bot example (#654)

Signed-off-by: mudler <mudler@localai.io>
renovate/github.com-imdario-mergo-1.x
Ettore Di Giacinto 1 year ago committed by GitHub
parent 91a67d5ee0
commit 2a45a99737
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 8
      examples/README.md
  2. 48
      examples/slack-qa-bot/.env.example
  3. 23
      examples/slack-qa-bot/README.md
  4. 97
      examples/slack-qa-bot/deployment.yaml
  5. 30
      examples/slack-qa-bot/docker-compose.yml

@ -98,6 +98,14 @@ Run a slack bot which lets you talk directly with a model
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-bot/)
### Slack bot (Question answering)
_by [@mudler](https://github.com/mudler)_
Run a slack bot, ideally for teams, which lets you ask questions on a documentation website, or a github repository.
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-qa-bot/)
### Question answering on documents with llama-index
_by [@mudler](https://github.com/mudler)_

@ -0,0 +1,48 @@
# Create an app-level token with connections:write scope
SLACK_APP_TOKEN=xapp-1-...
# Install the app into your workspace to grab this token
SLACK_BOT_TOKEN=xoxb-...
# Set this to a random string, it doesn't matter, however if present the python library complains
OPENAI_API_KEY=sk-foo-bar-baz
# Optional: gpt-3.5-turbo and gpt-4 are currently supported (default: gpt-3.5-turbo)
OPENAI_MODEL=gpt-3.5-turbo
# Optional: You can adjust the timeout seconds for OpenAI calls (default: 30)
OPENAI_TIMEOUT_SECONDS=560
MEMORY_DIR=/tmp/memory_dir
OPENAI_API_BASE=http://api:8080/v1
EMBEDDINGS_MODEL_NAME=all-MiniLM-L6-v2
## Repository and sitemap to index in the vector database on start
SITEMAP="https://kairos.io/sitemap.xml"
# Optional repository names.
# REPOSITORIES="foo,bar"
# # Define clone URL for "foo"
# foo_CLONE_URL="http://github.com.."
# bar_CLONE_URL="..."
# # Define branch for foo
# foo_BRANCH="master"
# Optional token if scraping issues
# GITHUB_PERSONAL_ACCESS_TOKEN=""
# ISSUE_REPOSITORIES="go-skynet/LocalAI,foo/bar,..."
# Optional: When the string is "true", this app translates ChatGPT prompts into a user's preferred language (default: true)
USE_SLACK_LANGUAGE=true
# Optional: Adjust the app's logging level (default: DEBUG)
SLACK_APP_LOG_LEVEL=INFO
# Optional: When the string is "true", translate between OpenAI markdown and Slack mrkdwn format (default: false)
TRANSLATE_MARKDOWN=true
### LocalAI
DEBUG=true
MODELS_PATH=/models
IMAGE_PATH=/tmp
# See: https://github.com/go-skynet/model-gallery
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]

@ -0,0 +1,23 @@
## Slack QA Bot
This example uses https://github.com/spectrocloud-labs/Slack-QA-bot to deploy a slack bot that can answer to your documentation!
- Create a new Slack app using the manifest-dev.yml file
- Install the app into your Slack workspace
- Retrieve your slack keys and edit `.env`
- Start the app
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/slack-qa-bot
cp -rfv .env.example .env
# Edit .env and add slackbot api keys, or repository settings to scan
vim .env
# run the bot
docker-compose up
```

@ -0,0 +1,97 @@
apiVersion: v1
kind: Namespace
metadata:
name: slack-bot
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: knowledgebase
namespace: slack-bot
labels:
app: localai-qabot
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: localai-qabot
namespace: slack-bot
labels:
app: localai-qabot
spec:
selector:
matchLabels:
app: localai-qabot
replicas: 1
template:
metadata:
labels:
app: localai-qabot
name: localai-qabot
spec:
containers:
- name: localai-qabot-slack
env:
- name: OPENAI_API_KEY
value: "x"
- name: SLACK_APP_TOKEN
value: "xapp-1-"
- name: SLACK_BOT_TOKEN
value: "xoxb-"
- name: OPENAI_MODEL
value: "gpt-3.5-turbo"
- name: OPENAI_TIMEOUT_SECONDS
value: "400"
- name: OPENAI_SYSTEM_TEXT
value: ""
- name: MEMORY_DIR
value: "/memory"
- name: TRANSLATE_MARKDOWN
value: "true"
- name: OPENAI_API_BASE
value: "http://local-ai.default.svc.cluster.local:8080"
- name: REPOSITORIES
value: "KAIROS,AGENT,SDK,OSBUILDER,PACKAGES,IMMUCORE"
- name: KAIROS_CLONE_URL
value: "https://github.com/kairos-io/kairos"
- name: KAIROS_BRANCH
value: "master"
- name: AGENT_CLONE_URL
value: "https://github.com/kairos-io/kairos-agent"
- name: AGENT_BRANCH
value: "main"
- name: SDK_CLONE_URL
value: "https://github.com/kairos-io/kairos-sdk"
- name: SDK_BRANCH
value: "main"
- name: OSBUILDER_CLONE_URL
value: "https://github.com/kairos-io/osbuilder"
- name: OSBUILDER_BRANCH
value: "master"
- name: PACKAGES_CLONE_URL
value: "https://github.com/kairos-io/packages"
- name: PACKAGES_BRANCH
value: "main"
- name: IMMUCORE_CLONE_URL
value: "https://github.com/kairos-io/immucore"
- name: IMMUCORE_BRANCH
value: "master"
- name: GITHUB_PERSONAL_ACCESS_TOKEN
value: ""
- name: ISSUE_REPOSITORIES
value: "kairos-io/kairos"
image: quay.io/spectrocloud-labs/slack-qa-local-bot:qa
imagePullPolicy: Always
volumeMounts:
- mountPath: "/memory"
name: knowledgebase
volumes:
- name: knowledgebase
persistentVolumeClaim:
claimName: knowledgebase

@ -0,0 +1,30 @@
version: "3"
services:
api:
image: quay.io/go-skynet/local-ai:latest
# As initially LocalAI will download the models defined in PRELOAD_MODELS
# you might need to tweak the healthcheck values here according to your network connection.
# Here we give a timespan of 20m to download all the required files.
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
interval: 1m
timeout: 20m
retries: 20
ports:
- 8080:8080
env_file:
- .env
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai" ]
slackbot:
image: quay.io/spectrocloud-labs/slack-qa-local-bot:qa
container_name: slackbot
restart: always
env_file:
- .env
depends_on:
api:
condition: service_healthy
Loading…
Cancel
Save