diff --git a/examples/langchain-python/README.md b/examples/langchain-python/README.md index 2472aab..aeff6c4 100644 --- a/examples/langchain-python/README.md +++ b/examples/langchain-python/README.md @@ -12,15 +12,8 @@ git clone https://github.com/go-skynet/LocalAI cd LocalAI/examples/langchain-python -# (optional) Checkout a specific LocalAI tag -# git checkout -b build - -# Download gpt4all-j to models/ -wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j - # start with docker-compose -docker-compose up -d --build - +docker-compose up --pull always pip install langchain pip install openai diff --git a/examples/langchain-python/docker-compose.yaml b/examples/langchain-python/docker-compose.yaml index 8bd61b5..0a023c0 100644 --- a/examples/langchain-python/docker-compose.yaml +++ b/examples/langchain-python/docker-compose.yaml @@ -3,6 +3,14 @@ version: '3.6' services: api: image: quay.io/go-skynet/local-ai:latest + # As initially LocalAI will download the models defined in PRELOAD_MODELS + # you might need to tweak the healthcheck values here according to your network connection. + # Here we give a timespan of 20m to download all the required files. + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] + interval: 1m + timeout: 20m + retries: 20 build: context: ../../ dockerfile: Dockerfile @@ -11,6 +19,9 @@ services: environment: - DEBUG=true - MODELS_PATH=/models + # You can preload different models here as well. + # See: https://github.com/go-skynet/model-gallery + - 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]' volumes: - ./models:/models:cached - command: ["/usr/bin/local-ai" ] + command: ["/usr/bin/local-ai" ] \ No newline at end of file diff --git a/examples/langchain-python/models b/examples/langchain-python/models deleted file mode 120000 index 9c5c2aa..0000000 --- a/examples/langchain-python/models +++ /dev/null @@ -1 +0,0 @@ -../chatbot-ui/models \ No newline at end of file