diff --git a/.vscode/launch.json b/.vscode/launch.json index e8d9482..b45837f 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,6 +1,19 @@ { "version": "0.2.0", "configurations": [ + { + "name": "Python: Current File", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal", + "justMyCode": false, + "cwd": "${workspaceFolder}/examples/langchain-chroma", + "env": { + "OPENAI_API_BASE": "http://localhost:8080/v1", + "OPENAI_API_KEY": "abc" + } + }, { "name": "Launch Go", "type": "go", diff --git a/examples/langchain-chroma/.env.example b/examples/langchain-chroma/.env.example new file mode 100644 index 0000000..37cda59 --- /dev/null +++ b/examples/langchain-chroma/.env.example @@ -0,0 +1,5 @@ +THREADS=4 +CONTEXT_SIZE=512 +MODELS_PATH=/models +DEBUG=true +# BUILD_TYPE=generic \ No newline at end of file diff --git a/examples/langchain-chroma/.gitignore b/examples/langchain-chroma/.gitignore new file mode 100644 index 0000000..3dc1901 --- /dev/null +++ b/examples/langchain-chroma/.gitignore @@ -0,0 +1,4 @@ +db/ +state_of_the_union.txt +models/bert +models/ggml-gpt4all-j \ No newline at end of file diff --git a/examples/langchain-chroma/README.md b/examples/langchain-chroma/README.md index 70e3f42..17207a0 100644 --- a/examples/langchain-chroma/README.md +++ b/examples/langchain-chroma/README.md @@ -10,13 +10,20 @@ Download the models and start the API: # Clone LocalAI git clone https://github.com/go-skynet/LocalAI -cd LocalAI/examples/query_data +cd LocalAI/examples/langchain-chroma wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O models/bert wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j +# configure your .env +# NOTE: ensure that THREADS does not exceed your machine's CPU cores +mv .env.example .env + # start with docker-compose docker-compose up -d --build + +# tail the logs & wait until the build completes +docker logs -f langchain-chroma-api-1 ``` ### Python requirements @@ -37,7 +44,7 @@ wget https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_ python store.py ``` -After it finishes, a directory "storage" will be created with the vector index database. +After it finishes, a directory "db" will be created with the vector index database. ## Query diff --git a/examples/langchain-chroma/docker-compose.yml b/examples/langchain-chroma/docker-compose.yml new file mode 100644 index 0000000..96ef540 --- /dev/null +++ b/examples/langchain-chroma/docker-compose.yml @@ -0,0 +1,15 @@ +version: '3.6' + +services: + api: + image: quay.io/go-skynet/local-ai:latest + build: + context: ../../ + dockerfile: Dockerfile + ports: + - 8080:8080 + env_file: + - ../../.env + volumes: + - ./models:/models:cached + command: ["/usr/bin/local-ai"] diff --git a/examples/langchain-chroma/models/embeddings.yaml b/examples/langchain-chroma/models/embeddings.yaml index 46a0850..536c8de 100644 --- a/examples/langchain-chroma/models/embeddings.yaml +++ b/examples/langchain-chroma/models/embeddings.yaml @@ -1,5 +1,6 @@ name: text-embedding-ada-002 parameters: model: bert +threads: 4 backend: bert-embeddings embeddings: true diff --git a/examples/langchain-chroma/query.py b/examples/langchain-chroma/query.py index 2f7df50..3384881 100644 --- a/examples/langchain-chroma/query.py +++ b/examples/langchain-chroma/query.py @@ -2,8 +2,9 @@ import os from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings -from langchain.llms import OpenAI -from langchain.chains import VectorDBQA +from langchain.chat_models import ChatOpenAI +from langchain.chains import RetrievalQA +from langchain.vectorstores.base import VectorStoreRetriever base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') @@ -12,8 +13,10 @@ embedding = OpenAIEmbeddings() persist_directory = 'db' # Now we can load the persisted database from disk, and use it as normal. +llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path) vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) -qa = VectorDBQA.from_chain_type(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path), chain_type="stuff", vectorstore=vectordb) +retriever = VectorStoreRetriever(vectorstore=vectordb) +qa = RetrievalQA.from_llm(llm=llm, retriever=retriever) query = "What the president said about taxes ?" print(qa.run(query)) diff --git a/examples/langchain-chroma/store.py b/examples/langchain-chroma/store.py index 127bb24..b9cbad0 100755 --- a/examples/langchain-chroma/store.py +++ b/examples/langchain-chroma/store.py @@ -2,9 +2,7 @@ import os from langchain.vectorstores import Chroma from langchain.embeddings import OpenAIEmbeddings -from langchain.text_splitter import RecursiveCharacterTextSplitter,TokenTextSplitter,CharacterTextSplitter -from langchain.llms import OpenAI -from langchain.chains import VectorDBQA +from langchain.text_splitter import CharacterTextSplitter from langchain.document_loaders import TextLoader base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1') @@ -14,7 +12,6 @@ loader = TextLoader('state_of_the_union.txt') documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=70) -#text_splitter = TokenTextSplitter() texts = text_splitter.split_documents(documents) # Embed and store the texts diff --git a/examples/query_data/docker-compose.yml b/examples/query_data/docker-compose.yml index a59edfc..cf76eb7 100644 --- a/examples/query_data/docker-compose.yml +++ b/examples/query_data/docker-compose.yml @@ -4,7 +4,7 @@ services: api: image: quay.io/go-skynet/local-ai:latest build: - context: . + context: ../../ dockerfile: Dockerfile ports: - 8080:8080