From 07c3aa18691a7ea071065276197afc1d8c196de9 Mon Sep 17 00:00:00 2001 From: Dave Date: Thu, 4 May 2023 02:41:13 -0400 Subject: [PATCH] Dockerized Langchain / PY example (#175) --- examples/langchain/PY.Dockerfile | 5 +++ examples/langchain/README.md | 13 +++---- examples/langchain/docker-compose.yaml | 22 ++++++++++- .../langchainjs-localai-example/src/index.mts | 2 +- .../.vscode/launch.json | 24 ++++++++++++ .../.vscode/settings.json | 3 ++ .../langchainpy-localai-example/full_demo.py | 39 +++++++++++++++++++ .../requirements.txt | 32 +++++++++++++++ .../simple_demo.py | 6 +++ examples/langchain/models/gpt-3.5-turbo.yaml | 1 + 10 files changed, 137 insertions(+), 10 deletions(-) create mode 100644 examples/langchain/PY.Dockerfile create mode 100644 examples/langchain/langchainpy-localai-example/.vscode/launch.json create mode 100644 examples/langchain/langchainpy-localai-example/.vscode/settings.json create mode 100644 examples/langchain/langchainpy-localai-example/full_demo.py create mode 100644 examples/langchain/langchainpy-localai-example/requirements.txt create mode 100644 examples/langchain/langchainpy-localai-example/simple_demo.py diff --git a/examples/langchain/PY.Dockerfile b/examples/langchain/PY.Dockerfile new file mode 100644 index 0000000..41d614d --- /dev/null +++ b/examples/langchain/PY.Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.10-bullseye +COPY ./langchainpy-localai-example /app +WORKDIR /app +RUN pip install --no-cache-dir -r requirements.txt +ENTRYPOINT [ "python", "./simple_demo.py" ]; \ No newline at end of file diff --git a/examples/langchain/README.md b/examples/langchain/README.md index 8aebab0..82f4b4f 100644 --- a/examples/langchain/README.md +++ b/examples/langchain/README.md @@ -1,10 +1,6 @@ # langchain -Example of using langchain in TypeScript, with the standard OpenAI llm module, and LocalAI. - -Example for python langchain to follow at a later date - -Set up to make it easy to modify the `index.mts` file to look like any langchain example file. +Example of using langchain, with the standard OpenAI llm module, and LocalAI. Has docker compose profiles for both the Typescript and Python versions. **Please Note** - This is a tech demo example at this time. ggml-gpt4all-j has pretty terrible results for most langchain applications with the settings used in this example. @@ -22,8 +18,11 @@ cd LocalAI/examples/langchain # Download gpt4all-j to models/ wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j -# start with docker-compose -docker-compose up --build +# start with docker-compose for typescript! +docker-compose --profile ts up --build + +# or start with docker-compose for python! +docker-compose --profile py up --build ``` ## Copyright diff --git a/examples/langchain/docker-compose.yaml b/examples/langchain/docker-compose.yaml index 7bd77cd..cafcb49 100644 --- a/examples/langchain/docker-compose.yaml +++ b/examples/langchain/docker-compose.yaml @@ -15,11 +15,29 @@ services: - ./models:/models:cached command: ["/usr/bin/local-ai" ] - langchainjs: + js: build: context: . dockerfile: JS.Dockerfile + profiles: + - js + - ts + depends_on: + - "api" environment: - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' - - 'OPENAI_API_HOST=http://api:8080/v1' + - 'OPENAI_API_BASE=http://api:8080/v1' + - 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g' + + py: + build: + context: . + dockerfile: PY.Dockerfile + profiles: + - py + depends_on: + - "api" + environment: + - 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' + - 'OPENAI_API_BASE=http://api:8080/v1' - 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g' \ No newline at end of file diff --git a/examples/langchain/langchainjs-localai-example/src/index.mts b/examples/langchain/langchainjs-localai-example/src/index.mts index ec01a5b..e6dcfb8 100644 --- a/examples/langchain/langchainjs-localai-example/src/index.mts +++ b/examples/langchain/langchainjs-localai-example/src/index.mts @@ -4,7 +4,7 @@ import { Document } from "langchain/document"; import { initializeAgentExecutorWithOptions } from "langchain/agents"; import {Calculator} from "langchain/tools/calculator"; -const pathToLocalAi = process.env['OPENAI_API_HOST'] || 'http://api:8080/v1'; +const pathToLocalAi = process.env['OPENAI_API_BASE'] || 'http://api:8080/v1'; const fakeApiKey = process.env['OPENAI_API_KEY'] || '-'; const modelName = process.env['MODEL_NAME'] || 'gpt-3.5-turbo'; diff --git a/examples/langchain/langchainpy-localai-example/.vscode/launch.json b/examples/langchain/langchainpy-localai-example/.vscode/launch.json new file mode 100644 index 0000000..e72fa79 --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/.vscode/launch.json @@ -0,0 +1,24 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Python: Current File", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal", + "redirectOutput": true, + "justMyCode": false + }, + { + "name": "Python: Attach to Port 5678", + "type": "python", + "request": "attach", + "connect": { + "host": "localhost", + "port": 5678 + }, + "justMyCode": false + } + ] +} \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/.vscode/settings.json b/examples/langchain/langchainpy-localai-example/.vscode/settings.json new file mode 100644 index 0000000..146756d --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.defaultInterpreterPath": "${workspaceFolder}/.venv/Scripts/python" +} \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/full_demo.py b/examples/langchain/langchainpy-localai-example/full_demo.py new file mode 100644 index 0000000..014e84a --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/full_demo.py @@ -0,0 +1,39 @@ +import os +from langchain.chat_models import ChatOpenAI +from langchain import PromptTemplate, LLMChain +from langchain.prompts.chat import ( + ChatPromptTemplate, + SystemMessagePromptTemplate, + AIMessagePromptTemplate, + HumanMessagePromptTemplate, +) +from langchain.schema import ( + AIMessage, + HumanMessage, + SystemMessage +) + +print('Langchain + LocalAI PYTHON Tests') + +base_path = os.environ.get('OPENAI_API_BASE', 'http://api:8080/v1') +key = os.environ.get('OPENAI_API_KEY', '-') +model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo') + + +chat = ChatOpenAI(temperature=0, openai_api_base=base_path, openai_api_key=key, model_name=model_name, max_tokens=100) + +print("Created ChatOpenAI for ", chat.model_name) + +template = "You are a helpful assistant that translates {input_language} to {output_language}." +system_message_prompt = SystemMessagePromptTemplate.from_template(template) +human_template = "{text}" +human_message_prompt = HumanMessagePromptTemplate.from_template(human_template) + +chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt]) + +print("ABOUT to execute") + +# get a chat completion from the formatted messages +chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages()) + +print("."); \ No newline at end of file diff --git a/examples/langchain/langchainpy-localai-example/requirements.txt b/examples/langchain/langchainpy-localai-example/requirements.txt new file mode 100644 index 0000000..c0c5943 --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/requirements.txt @@ -0,0 +1,32 @@ +aiohttp==3.8.4 +aiosignal==1.3.1 +async-timeout==4.0.2 +attrs==23.1.0 +certifi==2022.12.7 +charset-normalizer==3.1.0 +colorama==0.4.6 +dataclasses-json==0.5.7 +debugpy==1.6.7 +frozenlist==1.3.3 +greenlet==2.0.2 +idna==3.4 +langchain==0.0.157 +marshmallow==3.19.0 +marshmallow-enum==1.5.1 +multidict==6.0.4 +mypy-extensions==1.0.0 +numexpr==2.8.4 +numpy==1.24.3 +openai==0.27.6 +openapi-schema-pydantic==1.2.4 +packaging==23.1 +pydantic==1.10.7 +PyYAML==6.0 +requests==2.29.0 +SQLAlchemy==2.0.12 +tenacity==8.2.2 +tqdm==4.65.0 +typing-inspect==0.8.0 +typing_extensions==4.5.0 +urllib3==1.26.15 +yarl==1.9.2 diff --git a/examples/langchain/langchainpy-localai-example/simple_demo.py b/examples/langchain/langchainpy-localai-example/simple_demo.py new file mode 100644 index 0000000..a9fac35 --- /dev/null +++ b/examples/langchain/langchainpy-localai-example/simple_demo.py @@ -0,0 +1,6 @@ + +from langchain.llms import OpenAI + +llm = OpenAI(temperature=0.9,model_name="gpt-3.5-turbo") +text = "What would be a good company name for a company that makes colorful socks?" +print(llm(text)) diff --git a/examples/langchain/models/gpt-3.5-turbo.yaml b/examples/langchain/models/gpt-3.5-turbo.yaml index 156ec42..4ee02fa 100644 --- a/examples/langchain/models/gpt-3.5-turbo.yaml +++ b/examples/langchain/models/gpt-3.5-turbo.yaml @@ -12,6 +12,7 @@ stopwords: roles: user: " " system: " " +backend: "gptj" template: completion: completion chat: completion # gpt4all \ No newline at end of file