Compare commits
1 Commits
Author | SHA1 | Date |
---|---|---|
Mauro Morales | eb137c8a84 | 2 years ago |
@ -0,0 +1,3 @@ |
|||||||
|
ARG GO_VERSION=1.20 |
||||||
|
FROM mcr.microsoft.com/devcontainers/go:0-$GO_VERSION-bullseye |
||||||
|
RUN apt-get update && apt-get install -y cmake |
@ -0,0 +1,46 @@ |
|||||||
|
// For format details, see https://aka.ms/devcontainer.json. For config options, see the |
||||||
|
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose |
||||||
|
{ |
||||||
|
"name": "Existing Docker Compose (Extend)", |
||||||
|
|
||||||
|
// Update the 'dockerComposeFile' list if you have more compose files or use different names. |
||||||
|
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make. |
||||||
|
"dockerComposeFile": [ |
||||||
|
"../docker-compose.yaml", |
||||||
|
"docker-compose.yml" |
||||||
|
], |
||||||
|
|
||||||
|
// The 'service' property is the name of the service for the container that VS Code should |
||||||
|
// use. Update this value and .devcontainer/docker-compose.yml to the real service name. |
||||||
|
"service": "api", |
||||||
|
|
||||||
|
// The optional 'workspaceFolder' property is the path VS Code should open by default when |
||||||
|
// connected. This is typically a file mount in .devcontainer/docker-compose.yml |
||||||
|
"workspaceFolder": "/workspace", |
||||||
|
|
||||||
|
"features": { |
||||||
|
"ghcr.io/devcontainers/features/go:1": {}, |
||||||
|
"ghcr.io/azutake/devcontainer-features/go-packages-install:0": {} |
||||||
|
}, |
||||||
|
|
||||||
|
// Features to add to the dev container. More info: https://containers.dev/features. |
||||||
|
// "features": {}, |
||||||
|
|
||||||
|
// Use 'forwardPorts' to make a list of ports inside the container available locally. |
||||||
|
// "forwardPorts": [], |
||||||
|
|
||||||
|
// Uncomment the next line if you want start specific services in your Docker Compose config. |
||||||
|
// "runServices": [], |
||||||
|
|
||||||
|
// Uncomment the next line if you want to keep your containers running after VS Code shuts down. |
||||||
|
// "shutdownAction": "none", |
||||||
|
|
||||||
|
// Uncomment the next line to run commands after the container is created. |
||||||
|
"postCreateCommand": "make prepare" |
||||||
|
|
||||||
|
// Configure tool-specific properties. |
||||||
|
// "customizations": {}, |
||||||
|
|
||||||
|
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root. |
||||||
|
// "remoteUser": "devcontainer" |
||||||
|
} |
@ -0,0 +1,26 @@ |
|||||||
|
version: '3.6' |
||||||
|
services: |
||||||
|
# Update this to the name of the service you want to work with in your docker-compose.yml file |
||||||
|
api: |
||||||
|
# Uncomment if you want to override the service's Dockerfile to one in the .devcontainer |
||||||
|
# folder. Note that the path of the Dockerfile and context is relative to the *primary* |
||||||
|
# docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile" |
||||||
|
# array). The sample below assumes your primary file is in the root of your project. |
||||||
|
# |
||||||
|
build: |
||||||
|
context: . |
||||||
|
dockerfile: .devcontainer/Dockerfile |
||||||
|
|
||||||
|
volumes: |
||||||
|
# Update this to wherever you want VS Code to mount the folder of your project |
||||||
|
- .:/workspace:cached |
||||||
|
|
||||||
|
# Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust. |
||||||
|
# cap_add: |
||||||
|
# - SYS_PTRACE |
||||||
|
# security_opt: |
||||||
|
# - seccomp:unconfined |
||||||
|
|
||||||
|
# Overrides default command so things don't shut down after the process ends. |
||||||
|
command: /bin/sh -c "while sleep 1000; do :; done" |
||||||
|
|
@ -1,43 +1,5 @@ |
|||||||
## Set number of threads. |
|
||||||
## Note: prefer the number of physical cores. Overbooking the CPU degrades performance notably. |
|
||||||
# THREADS=14 |
# THREADS=14 |
||||||
|
|
||||||
## Specify a different bind address (defaults to ":8080") |
|
||||||
# ADDRESS=127.0.0.1:8080 |
|
||||||
|
|
||||||
## Default models context size |
|
||||||
# CONTEXT_SIZE=512 |
# CONTEXT_SIZE=512 |
||||||
# |
|
||||||
## Define galleries. |
|
||||||
## models will to install will be visible in `/models/available` |
|
||||||
# GALLERIES=[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}] |
|
||||||
|
|
||||||
## CORS settings |
|
||||||
# CORS=true |
|
||||||
# CORS_ALLOW_ORIGINS=* |
|
||||||
|
|
||||||
## Default path for models |
|
||||||
# |
|
||||||
MODELS_PATH=/models |
MODELS_PATH=/models |
||||||
|
|
||||||
## Enable debug mode |
|
||||||
# DEBUG=true |
# DEBUG=true |
||||||
|
# BUILD_TYPE=generic |
||||||
## Specify a build type. Available: cublas, openblas, clblas. |
|
||||||
# BUILD_TYPE=openblas |
|
||||||
|
|
||||||
## Uncomment and set to true to enable rebuilding from source |
|
||||||
# REBUILD=true |
|
||||||
|
|
||||||
## Enable go tags, available: stablediffusion, tts |
|
||||||
## stablediffusion: image generation with stablediffusion |
|
||||||
## tts: enables text-to-speech with go-piper |
|
||||||
## (requires REBUILD=true) |
|
||||||
# |
|
||||||
# GO_TAGS=stablediffusion |
|
||||||
|
|
||||||
## Path where to store generated images |
|
||||||
# IMAGE_PATH=/tmp |
|
||||||
|
|
||||||
## Specify a default upload limit in MB (whisper) |
|
||||||
# UPLOAD_LIMIT |
|
||||||
|
@ -1,5 +0,0 @@ |
|||||||
# These are supported funding model platforms |
|
||||||
|
|
||||||
github: [mudler] |
|
||||||
custom: |
|
||||||
- https://www.buymeacoffee.com/mudler |
|
@ -1,31 +0,0 @@ |
|||||||
--- |
|
||||||
name: Bug report |
|
||||||
about: Create a report to help us improve |
|
||||||
title: '' |
|
||||||
labels: bug |
|
||||||
assignees: mudler |
|
||||||
|
|
||||||
--- |
|
||||||
|
|
||||||
<!-- Thanks for helping us to improve LocalAI! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. --> |
|
||||||
|
|
||||||
**LocalAI version:** |
|
||||||
<!-- Container Image or LocalAI tag/commit --> |
|
||||||
|
|
||||||
**Environment, CPU architecture, OS, and Version:** |
|
||||||
<!-- Provide the output from "uname -a", HW specs, if it's a VM --> |
|
||||||
|
|
||||||
**Describe the bug** |
|
||||||
<!-- A clear and concise description of what the bug is. --> |
|
||||||
|
|
||||||
**To Reproduce** |
|
||||||
<!-- Steps to reproduce the behavior, including the LocalAI command used, if any --> |
|
||||||
|
|
||||||
**Expected behavior** |
|
||||||
<!-- A clear and concise description of what you expected to happen. --> |
|
||||||
|
|
||||||
**Logs** |
|
||||||
<!-- If applicable, add logs while running LocalAI in debug mode (`--debug` or `DEBUG=true`) to help explain your problem. --> |
|
||||||
|
|
||||||
**Additional context** |
|
||||||
<!-- Add any other context about the problem here. --> |
|
@ -1,8 +0,0 @@ |
|||||||
blank_issues_enabled: false |
|
||||||
contact_links: |
|
||||||
- name: Community Support |
|
||||||
url: https://github.com/go-skynet/LocalAI/discussions |
|
||||||
about: Please ask and answer questions here. |
|
||||||
- name: Discord |
|
||||||
url: https://discord.gg/uJAeKSAGDy |
|
||||||
about: Join our community on Discord! |
|
@ -1,22 +0,0 @@ |
|||||||
--- |
|
||||||
name: Feature request |
|
||||||
about: Suggest an idea for this project |
|
||||||
title: '' |
|
||||||
labels: enhancement |
|
||||||
assignees: mudler |
|
||||||
|
|
||||||
--- |
|
||||||
|
|
||||||
<!-- Thanks for helping us to improve LocalAI! We welcome all feature requests. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. --> |
|
||||||
|
|
||||||
**Is your feature request related to a problem? Please describe.** |
|
||||||
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] --> |
|
||||||
|
|
||||||
**Describe the solution you'd like** |
|
||||||
<!-- A clear and concise description of what you want to happen. --> |
|
||||||
|
|
||||||
**Describe alternatives you've considered** |
|
||||||
<!-- A clear and concise description of any alternative solutions or features you've considered. --> |
|
||||||
|
|
||||||
**Additional context** |
|
||||||
<!-- Add any other context or screenshots about the feature request here. --> |
|
@ -1,23 +0,0 @@ |
|||||||
**Description** |
|
||||||
|
|
||||||
This PR fixes # |
|
||||||
|
|
||||||
**Notes for Reviewers** |
|
||||||
|
|
||||||
|
|
||||||
**[Signed commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)** |
|
||||||
- [ ] Yes, I signed my commits. |
|
||||||
|
|
||||||
|
|
||||||
<!-- |
|
||||||
Thank you for contributing to LocalAI! |
|
||||||
|
|
||||||
Contributing Conventions: |
|
||||||
|
|
||||||
1. Include descriptive PR titles with [<component-name>] prepended. |
|
||||||
2. Build and test your changes before submitting a PR. |
|
||||||
3. Sign your commits |
|
||||||
|
|
||||||
By following the community's contribution conventions upfront, the review process will |
|
||||||
be accelerated and your PR merged more quickly. |
|
||||||
--> |
|
@ -1,9 +0,0 @@ |
|||||||
#!/bin/bash |
|
||||||
set -xe |
|
||||||
REPO=$1 |
|
||||||
BRANCH=$2 |
|
||||||
VAR=$3 |
|
||||||
|
|
||||||
LAST_COMMIT=$(curl -s -H "Accept: application/vnd.github.VERSION.sha" "https://api.github.com/repos/$REPO/commits/$BRANCH") |
|
||||||
|
|
||||||
sed -i Makefile -e "s/$VAR?=.*/$VAR?=$LAST_COMMIT/" |
|
@ -1,24 +0,0 @@ |
|||||||
# .github/release.yml |
|
||||||
|
|
||||||
changelog: |
|
||||||
exclude: |
|
||||||
labels: |
|
||||||
- ignore-for-release |
|
||||||
categories: |
|
||||||
- title: Breaking Changes đź› |
|
||||||
labels: |
|
||||||
- Semver-Major |
|
||||||
- breaking-change |
|
||||||
- title: "Bug fixes :bug:" |
|
||||||
labels: |
|
||||||
- bug |
|
||||||
- title: Exciting New Features 🎉 |
|
||||||
labels: |
|
||||||
- Semver-Minor |
|
||||||
- enhancement |
|
||||||
- title: đź‘’ Dependencies |
|
||||||
labels: |
|
||||||
- dependencies |
|
||||||
- title: Other Changes |
|
||||||
labels: |
|
||||||
- "*" |
|
@ -1,18 +0,0 @@ |
|||||||
# Number of days of inactivity before an issue becomes stale |
|
||||||
daysUntilStale: 45 |
|
||||||
# Number of days of inactivity before a stale issue is closed |
|
||||||
daysUntilClose: 10 |
|
||||||
# Issues with these labels will never be considered stale |
|
||||||
exemptLabels: |
|
||||||
- issue/willfix |
|
||||||
# Label to use when marking an issue as stale |
|
||||||
staleLabel: issue/stale |
|
||||||
# Comment to post when marking an issue as stale. Set to `false` to disable |
|
||||||
markComment: > |
|
||||||
This issue has been automatically marked as stale because it has not had |
|
||||||
recent activity. It will be closed if no further activity occurs. Thank you |
|
||||||
for your contributions. |
|
||||||
# Comment to post when closing a stale issue. Set to `false` to disable |
|
||||||
closeComment: > |
|
||||||
This issue is being automatically closed due to inactivity. |
|
||||||
However, you may choose to reopen this issue. |
|
@ -1,63 +0,0 @@ |
|||||||
name: Bump dependencies |
|
||||||
on: |
|
||||||
schedule: |
|
||||||
- cron: 0 20 * * * |
|
||||||
workflow_dispatch: |
|
||||||
jobs: |
|
||||||
bump: |
|
||||||
strategy: |
|
||||||
fail-fast: false |
|
||||||
matrix: |
|
||||||
include: |
|
||||||
- repository: "go-skynet/go-llama.cpp" |
|
||||||
variable: "GOLLAMA_VERSION" |
|
||||||
branch: "master" |
|
||||||
- repository: "go-skynet/go-llama.cpp" |
|
||||||
variable: "GOLLAMA_GRAMMAR_VERSION" |
|
||||||
branch: "master" |
|
||||||
- repository: "go-skynet/go-ggml-transformers.cpp" |
|
||||||
variable: "GOGGMLTRANSFORMERS_VERSION" |
|
||||||
branch: "master" |
|
||||||
- repository: "donomii/go-rwkv.cpp" |
|
||||||
variable: "RWKV_VERSION" |
|
||||||
branch: "main" |
|
||||||
- repository: "ggerganov/whisper.cpp" |
|
||||||
variable: "WHISPER_CPP_VERSION" |
|
||||||
branch: "master" |
|
||||||
- repository: "go-skynet/go-bert.cpp" |
|
||||||
variable: "BERT_VERSION" |
|
||||||
branch: "master" |
|
||||||
- repository: "go-skynet/bloomz.cpp" |
|
||||||
variable: "BLOOMZ_VERSION" |
|
||||||
branch: "main" |
|
||||||
- repository: "nomic-ai/gpt4all" |
|
||||||
variable: "GPT4ALL_VERSION" |
|
||||||
branch: "main" |
|
||||||
- repository: "mudler/go-ggllm.cpp" |
|
||||||
variable: "GOGGLLM_VERSION" |
|
||||||
branch: "master" |
|
||||||
- repository: "mudler/go-stable-diffusion" |
|
||||||
variable: "STABLEDIFFUSION_VERSION" |
|
||||||
branch: "master" |
|
||||||
- repository: "mudler/go-piper" |
|
||||||
variable: "PIPER_VERSION" |
|
||||||
branch: "master" |
|
||||||
runs-on: ubuntu-latest |
|
||||||
steps: |
|
||||||
- uses: actions/checkout@v3 |
|
||||||
- name: Bump dependencies 🔧 |
|
||||||
run: | |
|
||||||
bash .github/bump_deps.sh ${{ matrix.repository }} ${{ matrix.branch }} ${{ matrix.variable }} |
|
||||||
- name: Create Pull Request |
|
||||||
uses: peter-evans/create-pull-request@v5 |
|
||||||
with: |
|
||||||
token: ${{ secrets.UPDATE_BOT_TOKEN }} |
|
||||||
push-to-fork: ci-forks/LocalAI |
|
||||||
commit-message: ':arrow_up: Update ${{ matrix.repository }}' |
|
||||||
title: ':arrow_up: Update ${{ matrix.repository }}' |
|
||||||
branch: "update/${{ matrix.variable }}" |
|
||||||
body: Bump of ${{ matrix.repository }} version |
|
||||||
signoff: true |
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,79 +0,0 @@ |
|||||||
name: Build and Release |
|
||||||
|
|
||||||
on: push |
|
||||||
|
|
||||||
permissions: |
|
||||||
contents: write |
|
||||||
|
|
||||||
jobs: |
|
||||||
build-linux: |
|
||||||
strategy: |
|
||||||
matrix: |
|
||||||
include: |
|
||||||
- build: 'avx2' |
|
||||||
defines: '' |
|
||||||
- build: 'avx' |
|
||||||
defines: '-DLLAMA_AVX2=OFF' |
|
||||||
- build: 'avx512' |
|
||||||
defines: '-DLLAMA_AVX512=ON' |
|
||||||
runs-on: ubuntu-latest |
|
||||||
steps: |
|
||||||
- name: Clone |
|
||||||
uses: actions/checkout@v3 |
|
||||||
with: |
|
||||||
submodules: true |
|
||||||
- name: Dependencies |
|
||||||
run: | |
|
||||||
sudo apt-get update |
|
||||||
sudo apt-get install build-essential ffmpeg |
|
||||||
- name: Build |
|
||||||
id: build |
|
||||||
env: |
|
||||||
CMAKE_ARGS: "${{ matrix.defines }}" |
|
||||||
BUILD_ID: "${{ matrix.build }}" |
|
||||||
run: | |
|
||||||
STATIC=true make dist |
|
||||||
- uses: actions/upload-artifact@v3 |
|
||||||
with: |
|
||||||
name: ${{ matrix.build }} |
|
||||||
path: release/ |
|
||||||
- name: Release |
|
||||||
uses: softprops/action-gh-release@v1 |
|
||||||
if: startsWith(github.ref, 'refs/tags/') |
|
||||||
with: |
|
||||||
files: | |
|
||||||
release/* |
|
||||||
|
|
||||||
build-macOS: |
|
||||||
strategy: |
|
||||||
matrix: |
|
||||||
include: |
|
||||||
- build: 'avx2' |
|
||||||
defines: '' |
|
||||||
- build: 'avx' |
|
||||||
defines: '-DLLAMA_AVX2=OFF' |
|
||||||
- build: 'avx512' |
|
||||||
defines: '-DLLAMA_AVX512=ON' |
|
||||||
runs-on: macOS-latest |
|
||||||
steps: |
|
||||||
- name: Clone |
|
||||||
uses: actions/checkout@v3 |
|
||||||
with: |
|
||||||
submodules: true |
|
||||||
- name: Build |
|
||||||
id: build |
|
||||||
env: |
|
||||||
CMAKE_ARGS: "${{ matrix.defines }}" |
|
||||||
BUILD_ID: "${{ matrix.build }}" |
|
||||||
run: | |
|
||||||
make dist |
|
||||||
- uses: actions/upload-artifact@v3 |
|
||||||
with: |
|
||||||
name: ${{ matrix.build }} |
|
||||||
path: release/ |
|
||||||
- name: Release |
|
||||||
uses: softprops/action-gh-release@v1 |
|
||||||
if: startsWith(github.ref, 'refs/tags/') |
|
||||||
with: |
|
||||||
files: | |
|
||||||
release/* |
|
@ -0,0 +1,26 @@ |
|||||||
|
name: goreleaser |
||||||
|
|
||||||
|
on: |
||||||
|
push: |
||||||
|
tags: |
||||||
|
- 'v*' |
||||||
|
|
||||||
|
jobs: |
||||||
|
goreleaser: |
||||||
|
runs-on: ubuntu-latest |
||||||
|
steps: |
||||||
|
- name: Checkout |
||||||
|
uses: actions/checkout@v3 |
||||||
|
with: |
||||||
|
fetch-depth: 0 |
||||||
|
- name: Set up Go |
||||||
|
uses: actions/setup-go@v3 |
||||||
|
with: |
||||||
|
go-version: 1.18 |
||||||
|
- name: Run GoReleaser |
||||||
|
uses: goreleaser/goreleaser-action@v4 |
||||||
|
with: |
||||||
|
version: latest |
||||||
|
args: release --clean |
||||||
|
env: |
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} |
@ -0,0 +1,15 @@ |
|||||||
|
# Make sure to check the documentation at http://goreleaser.com |
||||||
|
project_name: local-ai |
||||||
|
builds: |
||||||
|
- ldflags: |
||||||
|
- -w -s |
||||||
|
env: |
||||||
|
- CGO_ENABLED=0 |
||||||
|
goos: |
||||||
|
- linux |
||||||
|
- darwin |
||||||
|
- windows |
||||||
|
goarch: |
||||||
|
- amd64 |
||||||
|
- arm64 |
||||||
|
binary: '{{ .ProjectName }}' |
@ -1,125 +0,0 @@ |
|||||||
ARG GO_VERSION=1.20-bullseye |
|
||||||
|
|
||||||
FROM golang:$GO_VERSION as requirements |
|
||||||
|
|
||||||
ARG BUILD_TYPE |
|
||||||
ARG CUDA_MAJOR_VERSION=11 |
|
||||||
ARG CUDA_MINOR_VERSION=7 |
|
||||||
ARG SPDLOG_VERSION="1.11.0" |
|
||||||
ARG PIPER_PHONEMIZE_VERSION='1.0.0' |
|
||||||
ARG TARGETARCH |
|
||||||
ARG TARGETVARIANT |
|
||||||
|
|
||||||
ENV BUILD_TYPE=${BUILD_TYPE} |
|
||||||
ENV EXTERNAL_GRPC_BACKENDS="huggingface-embeddings:/build/extra/grpc/huggingface/huggingface.py" |
|
||||||
ARG GO_TAGS="stablediffusion tts" |
|
||||||
|
|
||||||
RUN apt-get update && \ |
|
||||||
apt-get install -y ca-certificates cmake curl patch pip |
|
||||||
|
|
||||||
# Extras requirements |
|
||||||
COPY extra/requirements.txt /build/extra/requirements.txt |
|
||||||
RUN pip install -r /build/extra/requirements.txt && rm -rf /build/extra/requirements.txt |
|
||||||
|
|
||||||
# CuBLAS requirements |
|
||||||
RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \ |
|
||||||
apt-get install -y software-properties-common && \ |
|
||||||
apt-add-repository contrib && \ |
|
||||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.0-1_all.deb && \ |
|
||||||
dpkg -i cuda-keyring_1.0-1_all.deb && \ |
|
||||||
rm -f cuda-keyring_1.0-1_all.deb && \ |
|
||||||
apt-get update && \ |
|
||||||
apt-get install -y cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ |
|
||||||
; fi |
|
||||||
ENV PATH /usr/local/cuda/bin:${PATH} |
|
||||||
|
|
||||||
WORKDIR /build |
|
||||||
|
|
||||||
# OpenBLAS requirements |
|
||||||
RUN apt-get install -y libopenblas-dev |
|
||||||
|
|
||||||
# Stable Diffusion requirements |
|
||||||
RUN apt-get install -y libopencv-dev && \ |
|
||||||
ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2 |
|
||||||
|
|
||||||
# Use the variables in subsequent instructions |
|
||||||
RUN echo "Target Architecture: $TARGETARCH" |
|
||||||
RUN echo "Target Variant: $TARGETVARIANT" |
|
||||||
|
|
||||||
# piper requirements |
|
||||||
# Use pre-compiled Piper phonemization library (includes onnxruntime) |
|
||||||
#RUN if echo "${GO_TAGS}" | grep -q "tts"; then \ |
|
||||||
RUN test -n "$TARGETARCH" \ |
|
||||||
|| (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`') |
|
||||||
|
|
||||||
RUN curl -L "https://github.com/gabime/spdlog/archive/refs/tags/v${SPDLOG_VERSION}.tar.gz" | \ |
|
||||||
tar -xzvf - && \ |
|
||||||
mkdir -p "spdlog-${SPDLOG_VERSION}/build" && \ |
|
||||||
cd "spdlog-${SPDLOG_VERSION}/build" && \ |
|
||||||
cmake .. && \ |
|
||||||
make -j8 && \ |
|
||||||
cmake --install . --prefix /usr && mkdir -p "lib/Linux-$(uname -m)" && \ |
|
||||||
cd /build && \ |
|
||||||
mkdir -p "lib/Linux-$(uname -m)/piper_phonemize" && \ |
|
||||||
curl -L "https://github.com/rhasspy/piper-phonemize/releases/download/v${PIPER_PHONEMIZE_VERSION}/libpiper_phonemize-${TARGETARCH:-$(go env GOARCH)}${TARGETVARIANT}.tar.gz" | \ |
|
||||||
tar -C "lib/Linux-$(uname -m)/piper_phonemize" -xzvf - && ls -liah /build/lib/Linux-$(uname -m)/piper_phonemize/ && \ |
|
||||||
cp -rfv /build/lib/Linux-$(uname -m)/piper_phonemize/lib/. /lib64/ && \ |
|
||||||
cp -rfv /build/lib/Linux-$(uname -m)/piper_phonemize/lib/. /usr/lib/ && \ |
|
||||||
cp -rfv /build/lib/Linux-$(uname -m)/piper_phonemize/include/. /usr/include/ |
|
||||||
# \ |
|
||||||
# ; fi |
|
||||||
|
|
||||||
################################### |
|
||||||
################################### |
|
||||||
|
|
||||||
FROM requirements as builder |
|
||||||
|
|
||||||
ARG GO_TAGS="stablediffusion tts" |
|
||||||
|
|
||||||
ENV GO_TAGS=${GO_TAGS} |
|
||||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility |
|
||||||
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0" |
|
||||||
ENV NVIDIA_VISIBLE_DEVICES=all |
|
||||||
|
|
||||||
WORKDIR /build |
|
||||||
|
|
||||||
COPY Makefile . |
|
||||||
RUN make get-sources |
|
||||||
COPY go.mod . |
|
||||||
RUN make prepare |
|
||||||
COPY . . |
|
||||||
COPY .git . |
|
||||||
|
|
||||||
RUN ESPEAK_DATA=/build/lib/Linux-$(uname -m)/piper_phonemize/lib/espeak-ng-data make build |
|
||||||
|
|
||||||
################################### |
|
||||||
################################### |
|
||||||
|
|
||||||
FROM requirements |
|
||||||
|
|
||||||
ARG FFMPEG |
|
||||||
|
|
||||||
ENV REBUILD=false |
|
||||||
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz |
|
||||||
|
|
||||||
# Add FFmpeg |
|
||||||
RUN if [ "${FFMPEG}" = "true" ]; then \ |
|
||||||
apt-get install -y ffmpeg \ |
|
||||||
; fi |
|
||||||
|
|
||||||
WORKDIR /build |
|
||||||
|
|
||||||
# we start fresh & re-copy all assets because `make build` does not clean up nicely after itself |
|
||||||
# so when `entrypoint.sh` runs `make build` again (which it does by default), the build would fail |
|
||||||
# see https://github.com/go-skynet/LocalAI/pull/658#discussion_r1241971626 and |
|
||||||
# https://github.com/go-skynet/LocalAI/pull/434 |
|
||||||
COPY . . |
|
||||||
RUN make prepare-sources |
|
||||||
COPY --from=builder /build/local-ai ./ |
|
||||||
|
|
||||||
# Define the health check command |
|
||||||
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \ |
|
||||||
CMD curl -f $HEALTHCHECK_ENDPOINT || exit 1 |
|
||||||
|
|
||||||
EXPOSE 8080 |
|
||||||
ENTRYPOINT [ "/build/entrypoint.sh" ] |
|
@ -1,5 +0,0 @@ |
|||||||
VERSION 0.7 |
|
||||||
|
|
||||||
build: |
|
||||||
FROM DOCKERFILE -f Dockerfile . |
|
||||||
SAVE ARTIFACT /usr/bin/local-ai AS LOCAL local-ai |
|
@ -1,21 +0,0 @@ |
|||||||
MIT License |
|
||||||
|
|
||||||
Copyright (c) 2023 Ettore Di Giacinto |
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
|
||||||
of this software and associated documentation files (the "Software"), to deal |
|
||||||
in the Software without restriction, including without limitation the rights |
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|
||||||
copies of the Software, and to permit persons to whom the Software is |
|
||||||
furnished to do so, subject to the following conditions: |
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all |
|
||||||
copies or substantial portions of the Software. |
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
|
||||||
SOFTWARE. |
|
@ -1,445 +0,0 @@ |
|||||||
GOCMD=go
|
|
||||||
GOTEST=$(GOCMD) test
|
|
||||||
GOVET=$(GOCMD) vet
|
|
||||||
BINARY_NAME=local-ai
|
|
||||||
|
|
||||||
# llama.cpp versions
|
|
||||||
# Temporarly pinned to https://github.com/go-skynet/go-llama.cpp/pull/124
|
|
||||||
GOLLAMA_VERSION?=f3a6ee0ef53d667f110d28fcf9b808bdca741c07
|
|
||||||
|
|
||||||
GOLLAMA_GRAMMAR_VERSION?=cb8d7cd4cb95725a04504a9e3a26dd72a12b69ac
|
|
||||||
# Temporary set a specific version of llama.cpp
|
|
||||||
# containing: https://github.com/ggerganov/llama.cpp/pull/1773 and
|
|
||||||
# rebased on top of master.
|
|
||||||
# This pin can be dropped when the PR above is merged, and go-llama has merged changes as well
|
|
||||||
# Set empty to use the version pinned by go-llama
|
|
||||||
LLAMA_CPP_GRAMMAR_REPO?=https://github.com/mudler/llama.cpp
|
|
||||||
LLAMA_CPP_GRAMMAR_VERSION?=48ce8722a05a018681634af801fd0fd45b3a87cc
|
|
||||||
|
|
||||||
# gpt4all version
|
|
||||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
|
||||||
GPT4ALL_VERSION?=5f0aaf8bdb166ea3b5bfd578c2b19f61b583e6a9
|
|
||||||
|
|
||||||
# go-ggml-transformers version
|
|
||||||
GOGGMLTRANSFORMERS_VERSION?=ffb09d7dd71e2cbc6c5d7d05357d230eea6f369a
|
|
||||||
|
|
||||||
# go-rwkv version
|
|
||||||
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
|
||||||
RWKV_VERSION?=c898cd0f62df8f2a7830e53d1d513bef4f6f792b
|
|
||||||
|
|
||||||
# whisper.cpp version
|
|
||||||
WHISPER_CPP_VERSION?=85ed71aaec8e0612a84c0b67804bde75aa75a273
|
|
||||||
|
|
||||||
# bert.cpp version
|
|
||||||
BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d
|
|
||||||
|
|
||||||
# go-piper version
|
|
||||||
PIPER_VERSION?=56b8a81b4760a6fbee1a82e62f007ae7e8f010a7
|
|
||||||
|
|
||||||
# go-bloomz version
|
|
||||||
BLOOMZ_VERSION?=1834e77b83faafe912ad4092ccf7f77937349e2f
|
|
||||||
|
|
||||||
# stablediffusion version
|
|
||||||
STABLEDIFFUSION_VERSION?=d89260f598afb809279bc72aa0107b4292587632
|
|
||||||
|
|
||||||
# Go-ggllm
|
|
||||||
GOGGLLM_VERSION?=862477d16eefb0805261c19c9b0d053e3b2b684b
|
|
||||||
|
|
||||||
export BUILD_TYPE?=
|
|
||||||
CGO_LDFLAGS?=
|
|
||||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
|
||||||
GO_TAGS?=
|
|
||||||
BUILD_ID?=git
|
|
||||||
|
|
||||||
VERSION?=$(shell git describe --always --tags || echo "dev" )
|
|
||||||
# go tool nm ./local-ai | grep Commit
|
|
||||||
LD_FLAGS?=
|
|
||||||
override LD_FLAGS += -X "github.com/go-skynet/LocalAI/internal.Version=$(VERSION)"
|
|
||||||
override LD_FLAGS += -X "github.com/go-skynet/LocalAI/internal.Commit=$(shell git rev-parse HEAD)"
|
|
||||||
|
|
||||||
OPTIONAL_TARGETS?=
|
|
||||||
ESPEAK_DATA?=
|
|
||||||
|
|
||||||
OS := $(shell uname -s)
|
|
||||||
ARCH := $(shell uname -m)
|
|
||||||
GREEN := $(shell tput -Txterm setaf 2)
|
|
||||||
YELLOW := $(shell tput -Txterm setaf 3)
|
|
||||||
WHITE := $(shell tput -Txterm setaf 7)
|
|
||||||
CYAN := $(shell tput -Txterm setaf 6)
|
|
||||||
RESET := $(shell tput -Txterm sgr0)
|
|
||||||
|
|
||||||
ifndef UNAME_S |
|
||||||
UNAME_S := $(shell uname -s)
|
|
||||||
endif |
|
||||||
|
|
||||||
# workaround for rwkv.cpp
|
|
||||||
ifeq ($(UNAME_S),Darwin) |
|
||||||
CGO_LDFLAGS += -lcblas -framework Accelerate
|
|
||||||
endif |
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),openblas) |
|
||||||
CGO_LDFLAGS+=-lopenblas
|
|
||||||
endif |
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),cublas) |
|
||||||
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH)
|
|
||||||
export LLAMA_CUBLAS=1
|
|
||||||
endif |
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),metal) |
|
||||||
CGO_LDFLAGS+=-framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
|
||||||
export LLAMA_METAL=1
|
|
||||||
endif |
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),clblas) |
|
||||||
CGO_LDFLAGS+=-lOpenCL -lclblast
|
|
||||||
endif |
|
||||||
|
|
||||||
# glibc-static or glibc-devel-static required
|
|
||||||
ifeq ($(STATIC),true) |
|
||||||
LD_FLAGS=-linkmode external -extldflags -static
|
|
||||||
endif |
|
||||||
|
|
||||||
ifeq ($(findstring stablediffusion,$(GO_TAGS)),stablediffusion) |
|
||||||
# OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a
|
|
||||||
OPTIONAL_GRPC+=backend-assets/grpc/stablediffusion
|
|
||||||
endif |
|
||||||
|
|
||||||
ifeq ($(findstring tts,$(GO_TAGS)),tts) |
|
||||||
# OPTIONAL_TARGETS+=go-piper/libpiper_binding.a
|
|
||||||
# OPTIONAL_TARGETS+=backend-assets/espeak-ng-data
|
|
||||||
OPTIONAL_GRPC+=backend-assets/grpc/piper
|
|
||||||
endif |
|
||||||
|
|
||||||
.PHONY: all test build vendor |
|
||||||
|
|
||||||
all: help |
|
||||||
|
|
||||||
## GPT4ALL
|
|
||||||
gpt4all: |
|
||||||
git clone --recurse-submodules $(GPT4ALL_REPO) gpt4all
|
|
||||||
cd gpt4all && git checkout -b build $(GPT4ALL_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
## go-ggllm
|
|
||||||
go-ggllm: |
|
||||||
git clone --recurse-submodules https://github.com/mudler/go-ggllm.cpp go-ggllm
|
|
||||||
cd go-ggllm && git checkout -b build $(GOGGLLM_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
go-ggllm/libggllm.a: go-ggllm |
|
||||||
$(MAKE) -C go-ggllm BUILD_TYPE=$(BUILD_TYPE) libggllm.a
|
|
||||||
|
|
||||||
## go-piper
|
|
||||||
go-piper: |
|
||||||
git clone --recurse-submodules https://github.com/mudler/go-piper go-piper
|
|
||||||
cd go-piper && git checkout -b build $(PIPER_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
## BERT embeddings
|
|
||||||
go-bert: |
|
||||||
git clone --recurse-submodules https://github.com/go-skynet/go-bert.cpp go-bert
|
|
||||||
cd go-bert && git checkout -b build $(BERT_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
## stable diffusion
|
|
||||||
go-stable-diffusion: |
|
||||||
git clone --recurse-submodules https://github.com/mudler/go-stable-diffusion go-stable-diffusion
|
|
||||||
cd go-stable-diffusion && git checkout -b build $(STABLEDIFFUSION_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
go-stable-diffusion/libstablediffusion.a: |
|
||||||
$(MAKE) -C go-stable-diffusion libstablediffusion.a
|
|
||||||
|
|
||||||
## RWKV
|
|
||||||
go-rwkv: |
|
||||||
git clone --recurse-submodules $(RWKV_REPO) go-rwkv
|
|
||||||
cd go-rwkv && git checkout -b build $(RWKV_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
go-rwkv/librwkv.a: go-rwkv |
|
||||||
cd go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a ..
|
|
||||||
|
|
||||||
## bloomz
|
|
||||||
bloomz: |
|
||||||
git clone --recurse-submodules https://github.com/go-skynet/bloomz.cpp bloomz
|
|
||||||
cd bloomz && git checkout -b build $(BLOOMZ_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
bloomz/libbloomz.a: bloomz |
|
||||||
cd bloomz && make libbloomz.a
|
|
||||||
|
|
||||||
go-bert/libgobert.a: go-bert |
|
||||||
$(MAKE) -C go-bert libgobert.a
|
|
||||||
|
|
||||||
backend-assets/gpt4all: gpt4all/gpt4all-bindings/golang/libgpt4all.a |
|
||||||
mkdir -p backend-assets/gpt4all
|
|
||||||
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.so backend-assets/gpt4all/ || true
|
|
||||||
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.dylib backend-assets/gpt4all/ || true
|
|
||||||
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.dll backend-assets/gpt4all/ || true
|
|
||||||
|
|
||||||
backend-assets/espeak-ng-data: |
|
||||||
mkdir -p backend-assets/espeak-ng-data
|
|
||||||
ifdef ESPEAK_DATA |
|
||||||
@cp -rf $(ESPEAK_DATA)/. backend-assets/espeak-ng-data
|
|
||||||
else |
|
||||||
@echo "ESPEAK_DATA not set, skipping tts. Note that this will break the tts functionality."
|
|
||||||
@touch backend-assets/espeak-ng-data/keep
|
|
||||||
endif |
|
||||||
|
|
||||||
gpt4all/gpt4all-bindings/golang/libgpt4all.a: gpt4all |
|
||||||
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ libgpt4all.a
|
|
||||||
|
|
||||||
## CEREBRAS GPT
|
|
||||||
go-ggml-transformers: |
|
||||||
git clone --recurse-submodules https://github.com/go-skynet/go-ggml-transformers.cpp go-ggml-transformers
|
|
||||||
cd go-ggml-transformers && git checkout -b build $(GOGPT2_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
go-ggml-transformers/libtransformers.a: go-ggml-transformers |
|
||||||
$(MAKE) -C go-ggml-transformers BUILD_TYPE=$(BUILD_TYPE) libtransformers.a
|
|
||||||
|
|
||||||
whisper.cpp: |
|
||||||
git clone https://github.com/ggerganov/whisper.cpp.git
|
|
||||||
cd whisper.cpp && git checkout -b build $(WHISPER_CPP_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
whisper.cpp/libwhisper.a: whisper.cpp |
|
||||||
cd whisper.cpp && make libwhisper.a
|
|
||||||
|
|
||||||
go-llama: |
|
||||||
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
|
|
||||||
cd go-llama && git checkout -b build $(GOLLAMA_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
|
|
||||||
go-llama-grammar: |
|
||||||
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama-grammar
|
|
||||||
cd go-llama-grammar && git checkout -b build $(GOLLAMA_GRAMMAR_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
ifneq ($(LLAMA_CPP_GRAMMAR_REPO),) |
|
||||||
cd go-llama-grammar && rm -rf llama.cpp && git clone $(LLAMA_CPP_GRAMMAR_REPO) llama.cpp && cd llama.cpp && git checkout -b build $(LLAMA_CPP_GRAMMAR_VERSION) && git submodule update --init --recursive --depth 1
|
|
||||||
endif |
|
||||||
|
|
||||||
go-llama/libbinding.a: go-llama |
|
||||||
$(MAKE) -C go-llama BUILD_TYPE=$(BUILD_TYPE) libbinding.a
|
|
||||||
|
|
||||||
go-llama-grammar/libbinding.a: go-llama-grammar |
|
||||||
$(MAKE) -C go-llama-grammar BUILD_TYPE=$(BUILD_TYPE) libbinding.a
|
|
||||||
|
|
||||||
go-piper/libpiper_binding.a: |
|
||||||
$(MAKE) -C go-piper libpiper_binding.a example/main
|
|
||||||
|
|
||||||
get-sources: go-llama go-ggllm go-llama-grammar go-ggml-transformers gpt4all go-piper go-rwkv whisper.cpp go-bert bloomz go-stable-diffusion |
|
||||||
touch $@
|
|
||||||
|
|
||||||
replace: |
|
||||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama
|
|
||||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp-grammar=$(shell pwd)/go-llama-grammar
|
|
||||||
$(GOCMD) mod edit -replace github.com/nomic-ai/gpt4all/gpt4all-bindings/golang=$(shell pwd)/gpt4all/gpt4all-bindings/golang
|
|
||||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-ggml-transformers.cpp=$(shell pwd)/go-ggml-transformers
|
|
||||||
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(shell pwd)/go-rwkv
|
|
||||||
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(shell pwd)/whisper.cpp
|
|
||||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(shell pwd)/go-bert
|
|
||||||
$(GOCMD) mod edit -replace github.com/go-skynet/bloomz.cpp=$(shell pwd)/bloomz
|
|
||||||
$(GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion=$(shell pwd)/go-stable-diffusion
|
|
||||||
$(GOCMD) mod edit -replace github.com/mudler/go-piper=$(shell pwd)/go-piper
|
|
||||||
$(GOCMD) mod edit -replace github.com/mudler/go-ggllm.cpp=$(shell pwd)/go-ggllm
|
|
||||||
|
|
||||||
prepare-sources: get-sources replace |
|
||||||
$(GOCMD) mod download
|
|
||||||
|
|
||||||
## GENERIC
|
|
||||||
rebuild: ## Rebuilds the project
|
|
||||||
$(GOCMD) clean -cache
|
|
||||||
$(MAKE) -C go-llama clean
|
|
||||||
$(MAKE) -C go-llama-grammar clean
|
|
||||||
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ clean
|
|
||||||
$(MAKE) -C go-ggml-transformers clean
|
|
||||||
$(MAKE) -C go-rwkv clean
|
|
||||||
$(MAKE) -C whisper.cpp clean
|
|
||||||
$(MAKE) -C go-stable-diffusion clean
|
|
||||||
$(MAKE) -C go-bert clean
|
|
||||||
$(MAKE) -C bloomz clean
|
|
||||||
$(MAKE) -C go-piper clean
|
|
||||||
$(MAKE) -C go-ggllm clean
|
|
||||||
$(MAKE) build
|
|
||||||
|
|
||||||
prepare: prepare-sources $(OPTIONAL_TARGETS) |
|
||||||
touch $@
|
|
||||||
|
|
||||||
clean: ## Remove build related file
|
|
||||||
$(GOCMD) clean -cache
|
|
||||||
rm -fr ./go-llama
|
|
||||||
rm -rf ./gpt4all
|
|
||||||
rm -rf ./go-gpt2
|
|
||||||
rm -rf ./go-stable-diffusion
|
|
||||||
rm -rf ./go-ggml-transformers
|
|
||||||
rm -rf ./backend-assets
|
|
||||||
rm -rf ./go-rwkv
|
|
||||||
rm -rf ./go-bert
|
|
||||||
rm -rf ./bloomz
|
|
||||||
rm -rf ./whisper.cpp
|
|
||||||
rm -rf ./go-piper
|
|
||||||
rm -rf ./go-ggllm
|
|
||||||
rm -rf $(BINARY_NAME)
|
|
||||||
rm -rf release/
|
|
||||||
|
|
||||||
## Build:
|
|
||||||
|
|
||||||
build: grpcs prepare ## Build the project
|
|
||||||
$(info ${GREEN}I local-ai build info:${RESET})
|
|
||||||
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
|
|
||||||
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
|
|
||||||
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
|
|
||||||
|
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
|
|
||||||
|
|
||||||
dist: build |
|
||||||
mkdir -p release
|
|
||||||
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
|
|
||||||
|
|
||||||
## Run
|
|
||||||
run: prepare ## run local-ai
|
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./
|
|
||||||
|
|
||||||
test-models/testmodel: |
|
||||||
mkdir test-models
|
|
||||||
mkdir test-dir
|
|
||||||
wget https://huggingface.co/nnakasato/ggml-model-test/resolve/main/ggml-model-q4.bin -O test-models/testmodel
|
|
||||||
wget https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
|
|
||||||
wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O test-models/bert
|
|
||||||
wget https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
|
|
||||||
wget https://huggingface.co/mudler/rwkv-4-raven-1.5B-ggml/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%2525-Other1%2525-20230425-ctx4096_Q4_0.bin -O test-models/rwkv
|
|
||||||
wget https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O test-models/rwkv.tokenizer.json
|
|
||||||
cp tests/models_fixtures/* test-models
|
|
||||||
|
|
||||||
prepare-test: grpcs |
|
||||||
cp -rf backend-assets api
|
|
||||||
cp tests/models_fixtures/* test-models
|
|
||||||
|
|
||||||
test: prepare test-models/testmodel grpcs |
|
||||||
@echo 'Running tests'
|
|
||||||
export GO_TAGS="tts stablediffusion"
|
|
||||||
$(MAKE) prepare-test
|
|
||||||
HUGGINGFACE_GRPC=$(abspath ./)/extra/grpc/huggingface/huggingface.py TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="!gpt4all && !llama" --flake-attempts 5 -v -r ./api ./pkg
|
|
||||||
$(MAKE) test-gpt4all
|
|
||||||
$(MAKE) test-llama
|
|
||||||
$(MAKE) test-tts
|
|
||||||
$(MAKE) test-stablediffusion
|
|
||||||
|
|
||||||
test-gpt4all: prepare-test |
|
||||||
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="gpt4all" --flake-attempts 5 -v -r ./api ./pkg
|
|
||||||
|
|
||||||
test-llama: prepare-test |
|
||||||
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama" --flake-attempts 5 -v -r ./api ./pkg
|
|
||||||
|
|
||||||
test-tts: prepare-test |
|
||||||
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="tts" --flake-attempts 1 -v -r ./api ./pkg
|
|
||||||
|
|
||||||
test-stablediffusion: prepare-test |
|
||||||
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stablediffusion" --flake-attempts 1 -v -r ./api ./pkg
|
|
||||||
|
|
||||||
test-container: |
|
||||||
docker build --target requirements -t local-ai-test-container .
|
|
||||||
docker run -ti --rm --entrypoint /bin/bash -ti -v $(abspath ./):/build local-ai-test-container
|
|
||||||
|
|
||||||
## Help:
|
|
||||||
help: ## Show this help.
|
|
||||||
@echo ''
|
|
||||||
@echo 'Usage:'
|
|
||||||
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}'
|
|
||||||
@echo ''
|
|
||||||
@echo 'Targets:'
|
|
||||||
@awk 'BEGIN {FS = ":.*?## "} { \
|
|
||||||
if (/^[a-zA-Z_-]+:.*?##.*$$/) {printf " ${YELLOW}%-20s${GREEN}%s${RESET}\n", $$1, $$2} \
|
|
||||||
else if (/^## .*$$/) {printf " ${CYAN}%s${RESET}\n", substr($$1,4)} \
|
|
||||||
}' $(MAKEFILE_LIST)
|
|
||||||
|
|
||||||
protogen: protogen-go protogen-python |
|
||||||
|
|
||||||
protogen-go: |
|
||||||
protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative \
|
|
||||||
pkg/grpc/proto/backend.proto
|
|
||||||
|
|
||||||
protogen-python: |
|
||||||
python -m grpc_tools.protoc -Ipkg/grpc/proto/ --python_out=extra/grpc/huggingface/ --grpc_python_out=extra/grpc/huggingface/ pkg/grpc/proto/backend.proto
|
|
||||||
|
|
||||||
## GRPC
|
|
||||||
|
|
||||||
backend-assets/grpc: |
|
||||||
mkdir -p backend-assets/grpc
|
|
||||||
|
|
||||||
backend-assets/grpc/falcon: backend-assets/grpc go-ggllm/libggllm.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggllm LIBRARY_PATH=$(shell pwd)/go-ggllm \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/falcon ./cmd/grpc/falcon/
|
|
||||||
|
|
||||||
backend-assets/grpc/llama: backend-assets/grpc go-llama/libbinding.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-llama LIBRARY_PATH=$(shell pwd)/go-llama \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama ./cmd/grpc/llama/
|
|
||||||
# TODO: every binary should have its own folder instead, so can have different metal implementations
|
|
||||||
ifeq ($(BUILD_TYPE),metal) |
|
||||||
cp go-llama/build/bin/ggml-metal.metal backend-assets/grpc/
|
|
||||||
endif |
|
||||||
|
|
||||||
backend-assets/grpc/llama-grammar: backend-assets/grpc go-llama-grammar/libbinding.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-llama-grammar LIBRARY_PATH=$(shell pwd)/go-llama-grammar \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama-grammar ./cmd/grpc/llama-grammar/
|
|
||||||
|
|
||||||
backend-assets/grpc/gpt4all: backend-assets/grpc backend-assets/gpt4all gpt4all/gpt4all-bindings/golang/libgpt4all.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/gpt4all/gpt4all-bindings/golang/ LIBRARY_PATH=$(shell pwd)/gpt4all/gpt4all-bindings/golang/ \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt4all ./cmd/grpc/gpt4all/
|
|
||||||
|
|
||||||
backend-assets/grpc/dolly: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/dolly ./cmd/grpc/dolly/
|
|
||||||
|
|
||||||
backend-assets/grpc/gpt2: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt2 ./cmd/grpc/gpt2/
|
|
||||||
|
|
||||||
backend-assets/grpc/gptj: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gptj ./cmd/grpc/gptj/
|
|
||||||
|
|
||||||
backend-assets/grpc/gptneox: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gptneox ./cmd/grpc/gptneox/
|
|
||||||
|
|
||||||
backend-assets/grpc/mpt: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/mpt ./cmd/grpc/mpt/
|
|
||||||
|
|
||||||
backend-assets/grpc/replit: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/replit ./cmd/grpc/replit/
|
|
||||||
|
|
||||||
backend-assets/grpc/falcon-ggml: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/falcon-ggml ./cmd/grpc/falcon-ggml/
|
|
||||||
|
|
||||||
backend-assets/grpc/starcoder: backend-assets/grpc go-ggml-transformers/libtransformers.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-ggml-transformers LIBRARY_PATH=$(shell pwd)/go-ggml-transformers \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/starcoder ./cmd/grpc/starcoder/
|
|
||||||
|
|
||||||
backend-assets/grpc/rwkv: backend-assets/grpc go-rwkv/librwkv.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-rwkv LIBRARY_PATH=$(shell pwd)/go-rwkv \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/rwkv ./cmd/grpc/rwkv/
|
|
||||||
|
|
||||||
backend-assets/grpc/bloomz: backend-assets/grpc bloomz/libbloomz.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/bloomz LIBRARY_PATH=$(shell pwd)/bloomz \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/bloomz ./cmd/grpc/bloomz/
|
|
||||||
|
|
||||||
backend-assets/grpc/bert-embeddings: backend-assets/grpc go-bert/libgobert.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-bert LIBRARY_PATH=$(shell pwd)/go-bert \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/bert-embeddings ./cmd/grpc/bert-embeddings/
|
|
||||||
|
|
||||||
backend-assets/grpc/langchain-huggingface: backend-assets/grpc |
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/langchain-huggingface ./cmd/grpc/langchain-huggingface/
|
|
||||||
|
|
||||||
backend-assets/grpc/stablediffusion: backend-assets/grpc go-stable-diffusion/libstablediffusion.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/go-stable-diffusion/ LIBRARY_PATH=$(shell pwd)/go-stable-diffusion/ \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/stablediffusion ./cmd/grpc/stablediffusion/
|
|
||||||
|
|
||||||
backend-assets/grpc/piper: backend-assets/grpc backend-assets/espeak-ng-data go-piper/libpiper_binding.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" LIBRARY_PATH=$(shell pwd)/go-piper \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/piper ./cmd/grpc/piper/
|
|
||||||
|
|
||||||
backend-assets/grpc/whisper: backend-assets/grpc whisper.cpp/libwhisper.a |
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(shell pwd)/whisper.cpp LIBRARY_PATH=$(shell pwd)/whisper.cpp \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/whisper ./cmd/grpc/whisper/
|
|
||||||
|
|
||||||
grpcs: prepare backend-assets/grpc/langchain-huggingface backend-assets/grpc/llama-grammar backend-assets/grpc/falcon-ggml backend-assets/grpc/bert-embeddings backend-assets/grpc/falcon backend-assets/grpc/bloomz backend-assets/grpc/llama backend-assets/grpc/gpt4all backend-assets/grpc/dolly backend-assets/grpc/gpt2 backend-assets/grpc/gptj backend-assets/grpc/gptneox backend-assets/grpc/mpt backend-assets/grpc/replit backend-assets/grpc/starcoder backend-assets/grpc/rwkv backend-assets/grpc/whisper $(OPTIONAL_GRPC) |
|
@ -1,60 +0,0 @@ |
|||||||
# LOCAL AI |
|
||||||
|
|
||||||
## USAGE |
|
||||||
|
|
||||||
- Installation et démarrage: |
|
||||||
|
|
||||||
```bash |
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI |
|
||||||
|
|
||||||
# (optional) Checkout a specific LocalAI tag |
|
||||||
# git checkout -b build <TAG> |
|
||||||
|
|
||||||
# Download gpt4all-j to models/ |
|
||||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j |
|
||||||
|
|
||||||
# Use a template from the examples |
|
||||||
cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/ |
|
||||||
|
|
||||||
# (optional) Edit the .env file to set things like context size and threads |
|
||||||
# vim .env |
|
||||||
|
|
||||||
# start with docker-compose |
|
||||||
# docker-compose up -d --pull always |
|
||||||
# or you can build the images with: |
|
||||||
docker-compose up -d --build |
|
||||||
# Now API is accessible at localhost:8080 |
|
||||||
curl http://localhost:8080/v1/models |
|
||||||
# {"object":"list","data":[{"id":"ggml-gpt4all-j","object":"model"}]} |
|
||||||
|
|
||||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{ |
|
||||||
"model": "ggml-gpt4all-j", |
|
||||||
"messages": [{"role": "user", "content": "How are you?"}], |
|
||||||
"temperature": 0.9 |
|
||||||
}' |
|
||||||
|
|
||||||
# {"model":"ggml-gpt4all-j","choices":[{"message":{"role":"assistant","content":"I'm doing well, thanks. How about you?"}}]} |
|
||||||
``` |
|
||||||
|
|
||||||
- Python implementation: |
|
||||||
|
|
||||||
```python |
|
||||||
import openai |
|
||||||
|
|
||||||
openai.api_base = "http://localhost:8080/v1" |
|
||||||
|
|
||||||
# create a chat completion |
|
||||||
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}]) |
|
||||||
|
|
||||||
# print the completion |
|
||||||
print(completion.choices[0].message.content) |
|
||||||
``` |
|
||||||
|
|
||||||
## TO DO |
|
||||||
|
|
||||||
- [ ] Flask app frontend |
|
||||||
- [ ] Keycloak auth |
|
||||||
- [ ] speech to text avec openVINO |
|
@ -1,184 +0,0 @@ |
|||||||
package api |
|
||||||
|
|
||||||
import ( |
|
||||||
"errors" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/localai" |
|
||||||
"github.com/go-skynet/LocalAI/api/openai" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/internal" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/assets" |
|
||||||
|
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/gofiber/fiber/v2/middleware/cors" |
|
||||||
"github.com/gofiber/fiber/v2/middleware/logger" |
|
||||||
"github.com/gofiber/fiber/v2/middleware/recover" |
|
||||||
"github.com/rs/zerolog" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
func App(opts ...options.AppOption) (*fiber.App, error) { |
|
||||||
options := options.NewOptions(opts...) |
|
||||||
|
|
||||||
zerolog.SetGlobalLevel(zerolog.InfoLevel) |
|
||||||
if options.Debug { |
|
||||||
zerolog.SetGlobalLevel(zerolog.DebugLevel) |
|
||||||
} |
|
||||||
|
|
||||||
// Return errors as JSON responses
|
|
||||||
app := fiber.New(fiber.Config{ |
|
||||||
BodyLimit: options.UploadLimitMB * 1024 * 1024, // this is the default limit of 4MB
|
|
||||||
DisableStartupMessage: options.DisableMessage, |
|
||||||
// Override default error handler
|
|
||||||
ErrorHandler: func(ctx *fiber.Ctx, err error) error { |
|
||||||
// Status code defaults to 500
|
|
||||||
code := fiber.StatusInternalServerError |
|
||||||
|
|
||||||
// Retrieve the custom status code if it's a *fiber.Error
|
|
||||||
var e *fiber.Error |
|
||||||
if errors.As(err, &e) { |
|
||||||
code = e.Code |
|
||||||
} |
|
||||||
|
|
||||||
// Send custom error page
|
|
||||||
return ctx.Status(code).JSON( |
|
||||||
openai.ErrorResponse{ |
|
||||||
Error: &openai.APIError{Message: err.Error(), Code: code}, |
|
||||||
}, |
|
||||||
) |
|
||||||
}, |
|
||||||
}) |
|
||||||
|
|
||||||
if options.Debug { |
|
||||||
app.Use(logger.New(logger.Config{ |
|
||||||
Format: "[${ip}]:${port} ${status} - ${method} ${path}\n", |
|
||||||
})) |
|
||||||
} |
|
||||||
|
|
||||||
log.Info().Msgf("Starting LocalAI using %d threads, with models path: %s", options.Threads, options.Loader.ModelPath) |
|
||||||
log.Info().Msgf("LocalAI version: %s", internal.PrintableVersion()) |
|
||||||
|
|
||||||
cm := config.NewConfigLoader() |
|
||||||
if err := cm.LoadConfigs(options.Loader.ModelPath); err != nil { |
|
||||||
log.Error().Msgf("error loading config files: %s", err.Error()) |
|
||||||
} |
|
||||||
|
|
||||||
if options.ConfigFile != "" { |
|
||||||
if err := cm.LoadConfigFile(options.ConfigFile); err != nil { |
|
||||||
log.Error().Msgf("error loading config file: %s", err.Error()) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if options.Debug { |
|
||||||
for _, v := range cm.ListConfigs() { |
|
||||||
cfg, _ := cm.GetConfig(v) |
|
||||||
log.Debug().Msgf("Model: %s (config: %+v)", v, cfg) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if options.AssetsDestination != "" { |
|
||||||
// Extract files from the embedded FS
|
|
||||||
err := assets.ExtractFiles(options.BackendAssets, options.AssetsDestination) |
|
||||||
log.Debug().Msgf("Extracting backend assets files to %s", options.AssetsDestination) |
|
||||||
if err != nil { |
|
||||||
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Default middleware config
|
|
||||||
app.Use(recover.New()) |
|
||||||
|
|
||||||
if options.PreloadJSONModels != "" { |
|
||||||
if err := localai.ApplyGalleryFromString(options.Loader.ModelPath, options.PreloadJSONModels, cm, options.Galleries); err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if options.PreloadModelsFromPath != "" { |
|
||||||
if err := localai.ApplyGalleryFromFile(options.Loader.ModelPath, options.PreloadModelsFromPath, cm, options.Galleries); err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if options.CORS { |
|
||||||
var c func(ctx *fiber.Ctx) error |
|
||||||
if options.CORSAllowOrigins == "" { |
|
||||||
c = cors.New() |
|
||||||
} else { |
|
||||||
c = cors.New(cors.Config{AllowOrigins: options.CORSAllowOrigins}) |
|
||||||
} |
|
||||||
|
|
||||||
app.Use(c) |
|
||||||
} |
|
||||||
|
|
||||||
// LocalAI API endpoints
|
|
||||||
galleryService := localai.NewGalleryService(options.Loader.ModelPath) |
|
||||||
galleryService.Start(options.Context, cm) |
|
||||||
|
|
||||||
app.Get("/version", func(c *fiber.Ctx) error { |
|
||||||
return c.JSON(struct { |
|
||||||
Version string `json:"version"` |
|
||||||
}{Version: internal.PrintableVersion()}) |
|
||||||
}) |
|
||||||
|
|
||||||
app.Post("/models/apply", localai.ApplyModelGalleryEndpoint(options.Loader.ModelPath, cm, galleryService.C, options.Galleries)) |
|
||||||
app.Get("/models/available", localai.ListModelFromGalleryEndpoint(options.Galleries, options.Loader.ModelPath)) |
|
||||||
app.Get("/models/jobs/:uuid", localai.GetOpStatusEndpoint(galleryService)) |
|
||||||
|
|
||||||
// openAI compatible API endpoint
|
|
||||||
|
|
||||||
// chat
|
|
||||||
app.Post("/v1/chat/completions", openai.ChatEndpoint(cm, options)) |
|
||||||
app.Post("/chat/completions", openai.ChatEndpoint(cm, options)) |
|
||||||
|
|
||||||
// edit
|
|
||||||
app.Post("/v1/edits", openai.EditEndpoint(cm, options)) |
|
||||||
app.Post("/edits", openai.EditEndpoint(cm, options)) |
|
||||||
|
|
||||||
// completion
|
|
||||||
app.Post("/v1/completions", openai.CompletionEndpoint(cm, options)) |
|
||||||
app.Post("/completions", openai.CompletionEndpoint(cm, options)) |
|
||||||
app.Post("/v1/engines/:model/completions", openai.CompletionEndpoint(cm, options)) |
|
||||||
|
|
||||||
// embeddings
|
|
||||||
app.Post("/v1/embeddings", openai.EmbeddingsEndpoint(cm, options)) |
|
||||||
app.Post("/embeddings", openai.EmbeddingsEndpoint(cm, options)) |
|
||||||
app.Post("/v1/engines/:model/embeddings", openai.EmbeddingsEndpoint(cm, options)) |
|
||||||
|
|
||||||
// audio
|
|
||||||
app.Post("/v1/audio/transcriptions", openai.TranscriptEndpoint(cm, options)) |
|
||||||
app.Post("/tts", localai.TTSEndpoint(cm, options)) |
|
||||||
|
|
||||||
// images
|
|
||||||
app.Post("/v1/images/generations", openai.ImageEndpoint(cm, options)) |
|
||||||
|
|
||||||
if options.ImageDir != "" { |
|
||||||
app.Static("/generated-images", options.ImageDir) |
|
||||||
} |
|
||||||
|
|
||||||
if options.AudioDir != "" { |
|
||||||
app.Static("/generated-audio", options.AudioDir) |
|
||||||
} |
|
||||||
|
|
||||||
ok := func(c *fiber.Ctx) error { |
|
||||||
return c.SendStatus(200) |
|
||||||
} |
|
||||||
|
|
||||||
// Kubernetes health checks
|
|
||||||
app.Get("/healthz", ok) |
|
||||||
app.Get("/readyz", ok) |
|
||||||
|
|
||||||
// models
|
|
||||||
app.Get("/v1/models", openai.ListModelsEndpoint(options.Loader, cm)) |
|
||||||
app.Get("/models", openai.ListModelsEndpoint(options.Loader, cm)) |
|
||||||
|
|
||||||
// turn off any process that was started by GRPC if the context is canceled
|
|
||||||
go func() { |
|
||||||
<-options.Context.Done() |
|
||||||
log.Debug().Msgf("Context canceled, shutting down") |
|
||||||
options.Loader.StopGRPC() |
|
||||||
}() |
|
||||||
|
|
||||||
return app, nil |
|
||||||
} |
|
@ -1,773 +0,0 @@ |
|||||||
package api_test |
|
||||||
|
|
||||||
import ( |
|
||||||
"bytes" |
|
||||||
"context" |
|
||||||
"embed" |
|
||||||
"encoding/json" |
|
||||||
"errors" |
|
||||||
"fmt" |
|
||||||
"io" |
|
||||||
"io/ioutil" |
|
||||||
"net/http" |
|
||||||
"os" |
|
||||||
"path/filepath" |
|
||||||
"runtime" |
|
||||||
|
|
||||||
. "github.com/go-skynet/LocalAI/api" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/gallery" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/utils" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
. "github.com/onsi/ginkgo/v2" |
|
||||||
. "github.com/onsi/gomega" |
|
||||||
"gopkg.in/yaml.v3" |
|
||||||
|
|
||||||
openaigo "github.com/otiai10/openaigo" |
|
||||||
"github.com/sashabaranov/go-openai" |
|
||||||
"github.com/sashabaranov/go-openai/jsonschema" |
|
||||||
) |
|
||||||
|
|
||||||
type modelApplyRequest struct { |
|
||||||
ID string `json:"id"` |
|
||||||
URL string `json:"url"` |
|
||||||
Name string `json:"name"` |
|
||||||
Overrides map[string]string `json:"overrides"` |
|
||||||
} |
|
||||||
|
|
||||||
func getModelStatus(url string) (response map[string]interface{}) { |
|
||||||
// Create the HTTP request
|
|
||||||
resp, err := http.Get(url) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error creating request:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
defer resp.Body.Close() |
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error reading response body:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
// Unmarshal the response into a map[string]interface{}
|
|
||||||
err = json.Unmarshal(body, &response) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error unmarshaling JSON response:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
func getModels(url string) (response []gallery.GalleryModel) { |
|
||||||
utils.GetURI(url, func(url string, i []byte) error { |
|
||||||
// Unmarshal YAML data into a struct
|
|
||||||
return json.Unmarshal(i, &response) |
|
||||||
}) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
func postModelApplyRequest(url string, request modelApplyRequest) (response map[string]interface{}) { |
|
||||||
|
|
||||||
//url := "http://localhost:AI/models/apply"
|
|
||||||
|
|
||||||
// Create the request payload
|
|
||||||
|
|
||||||
payload, err := json.Marshal(request) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error marshaling JSON:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
// Create the HTTP request
|
|
||||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload)) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error creating request:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
req.Header.Set("Content-Type", "application/json") |
|
||||||
|
|
||||||
// Make the request
|
|
||||||
client := &http.Client{} |
|
||||||
resp, err := client.Do(req) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error making request:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
defer resp.Body.Close() |
|
||||||
|
|
||||||
body, err := ioutil.ReadAll(resp.Body) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error reading response body:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
// Unmarshal the response into a map[string]interface{}
|
|
||||||
err = json.Unmarshal(body, &response) |
|
||||||
if err != nil { |
|
||||||
fmt.Println("Error unmarshaling JSON response:", err) |
|
||||||
return |
|
||||||
} |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
//go:embed backend-assets/*
|
|
||||||
var backendAssets embed.FS |
|
||||||
|
|
||||||
var _ = Describe("API test", func() { |
|
||||||
|
|
||||||
var app *fiber.App |
|
||||||
var modelLoader *model.ModelLoader |
|
||||||
var client *openai.Client |
|
||||||
var client2 *openaigo.Client |
|
||||||
var c context.Context |
|
||||||
var cancel context.CancelFunc |
|
||||||
var tmpdir string |
|
||||||
|
|
||||||
commonOpts := []options.AppOption{ |
|
||||||
options.WithDebug(true), |
|
||||||
options.WithDisableMessage(true), |
|
||||||
} |
|
||||||
|
|
||||||
Context("API with ephemeral models", func() { |
|
||||||
BeforeEach(func() { |
|
||||||
var err error |
|
||||||
tmpdir, err = os.MkdirTemp("", "") |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
|
|
||||||
modelLoader = model.NewModelLoader(tmpdir) |
|
||||||
c, cancel = context.WithCancel(context.Background()) |
|
||||||
|
|
||||||
g := []gallery.GalleryModel{ |
|
||||||
{ |
|
||||||
Name: "bert", |
|
||||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml", |
|
||||||
}, |
|
||||||
{ |
|
||||||
Name: "bert2", |
|
||||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml", |
|
||||||
Overrides: map[string]interface{}{"foo": "bar"}, |
|
||||||
AdditionalFiles: []gallery.File{{Filename: "foo.yaml", URI: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml"}}, |
|
||||||
}, |
|
||||||
} |
|
||||||
out, err := yaml.Marshal(g) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "gallery_simple.yaml"), out, 0644) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
|
|
||||||
galleries := []gallery.Gallery{ |
|
||||||
{ |
|
||||||
Name: "test", |
|
||||||
URL: "file://" + filepath.Join(tmpdir, "gallery_simple.yaml"), |
|
||||||
}, |
|
||||||
} |
|
||||||
|
|
||||||
app, err = App( |
|
||||||
append(commonOpts, |
|
||||||
options.WithContext(c), |
|
||||||
options.WithGalleries(galleries), |
|
||||||
options.WithModelLoader(modelLoader), options.WithBackendAssets(backendAssets), options.WithBackendAssetsOutput(tmpdir))...) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
go app.Listen("127.0.0.1:9090") |
|
||||||
|
|
||||||
defaultConfig := openai.DefaultConfig("") |
|
||||||
defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" |
|
||||||
|
|
||||||
client2 = openaigo.NewClient("") |
|
||||||
client2.BaseURL = defaultConfig.BaseURL |
|
||||||
|
|
||||||
// Wait for API to be ready
|
|
||||||
client = openai.NewClientWithConfig(defaultConfig) |
|
||||||
Eventually(func() error { |
|
||||||
_, err := client.ListModels(context.TODO()) |
|
||||||
return err |
|
||||||
}, "2m").ShouldNot(HaveOccurred()) |
|
||||||
}) |
|
||||||
|
|
||||||
AfterEach(func() { |
|
||||||
cancel() |
|
||||||
app.Shutdown() |
|
||||||
os.RemoveAll(tmpdir) |
|
||||||
}) |
|
||||||
|
|
||||||
Context("Applying models", func() { |
|
||||||
It("applies models from a gallery", func() { |
|
||||||
|
|
||||||
models := getModels("http://127.0.0.1:9090/models/available") |
|
||||||
Expect(len(models)).To(Equal(2), fmt.Sprint(models)) |
|
||||||
Expect(models[0].Installed).To(BeFalse(), fmt.Sprint(models)) |
|
||||||
Expect(models[1].Installed).To(BeFalse(), fmt.Sprint(models)) |
|
||||||
|
|
||||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ |
|
||||||
ID: "test@bert2", |
|
||||||
}) |
|
||||||
|
|
||||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) |
|
||||||
|
|
||||||
uuid := response["uuid"].(string) |
|
||||||
resp := map[string]interface{}{} |
|
||||||
Eventually(func() bool { |
|
||||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) |
|
||||||
fmt.Println(response) |
|
||||||
resp = response |
|
||||||
return response["processed"].(bool) |
|
||||||
}, "360s", "10s").Should(Equal(true)) |
|
||||||
Expect(resp["message"]).ToNot(ContainSubstring("error")) |
|
||||||
|
|
||||||
dat, err := os.ReadFile(filepath.Join(tmpdir, "bert2.yaml")) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
|
|
||||||
_, err = os.ReadFile(filepath.Join(tmpdir, "foo.yaml")) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
|
|
||||||
content := map[string]interface{}{} |
|
||||||
err = yaml.Unmarshal(dat, &content) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(content["backend"]).To(Equal("bert-embeddings")) |
|
||||||
Expect(content["foo"]).To(Equal("bar")) |
|
||||||
|
|
||||||
models = getModels("http://127.0.0.1:9090/models/available") |
|
||||||
Expect(len(models)).To(Equal(2), fmt.Sprint(models)) |
|
||||||
Expect(models[0].Name).To(Or(Equal("bert"), Equal("bert2"))) |
|
||||||
Expect(models[1].Name).To(Or(Equal("bert"), Equal("bert2"))) |
|
||||||
for _, m := range models { |
|
||||||
if m.Name == "bert2" { |
|
||||||
Expect(m.Installed).To(BeTrue()) |
|
||||||
} else { |
|
||||||
Expect(m.Installed).To(BeFalse()) |
|
||||||
} |
|
||||||
} |
|
||||||
}) |
|
||||||
It("overrides models", func() { |
|
||||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ |
|
||||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml", |
|
||||||
Name: "bert", |
|
||||||
Overrides: map[string]string{ |
|
||||||
"backend": "llama", |
|
||||||
}, |
|
||||||
}) |
|
||||||
|
|
||||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) |
|
||||||
|
|
||||||
uuid := response["uuid"].(string) |
|
||||||
|
|
||||||
Eventually(func() bool { |
|
||||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) |
|
||||||
return response["processed"].(bool) |
|
||||||
}, "360s", "10s").Should(Equal(true)) |
|
||||||
|
|
||||||
dat, err := os.ReadFile(filepath.Join(tmpdir, "bert.yaml")) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
|
|
||||||
content := map[string]interface{}{} |
|
||||||
err = yaml.Unmarshal(dat, &content) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(content["backend"]).To(Equal("llama")) |
|
||||||
}) |
|
||||||
It("apply models without overrides", func() { |
|
||||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ |
|
||||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml", |
|
||||||
Name: "bert", |
|
||||||
Overrides: map[string]string{}, |
|
||||||
}) |
|
||||||
|
|
||||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) |
|
||||||
|
|
||||||
uuid := response["uuid"].(string) |
|
||||||
|
|
||||||
Eventually(func() bool { |
|
||||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) |
|
||||||
return response["processed"].(bool) |
|
||||||
}, "360s", "10s").Should(Equal(true)) |
|
||||||
|
|
||||||
dat, err := os.ReadFile(filepath.Join(tmpdir, "bert.yaml")) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
|
|
||||||
content := map[string]interface{}{} |
|
||||||
err = yaml.Unmarshal(dat, &content) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(content["backend"]).To(Equal("bert-embeddings")) |
|
||||||
}) |
|
||||||
|
|
||||||
It("runs openllama", Label("llama"), func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ |
|
||||||
URL: "github:go-skynet/model-gallery/openllama_3b.yaml", |
|
||||||
Name: "openllama_3b", |
|
||||||
Overrides: map[string]string{"backend": "llama-grammar"}, |
|
||||||
}) |
|
||||||
|
|
||||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) |
|
||||||
|
|
||||||
uuid := response["uuid"].(string) |
|
||||||
|
|
||||||
Eventually(func() bool { |
|
||||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) |
|
||||||
return response["processed"].(bool) |
|
||||||
}, "360s", "10s").Should(Equal(true)) |
|
||||||
|
|
||||||
By("testing completion") |
|
||||||
resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "openllama_3b", Prompt: "Count up to five: one, two, three, four, "}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Text).To(ContainSubstring("five")) |
|
||||||
|
|
||||||
By("testing functions") |
|
||||||
resp2, err := client.CreateChatCompletion( |
|
||||||
context.TODO(), |
|
||||||
openai.ChatCompletionRequest{ |
|
||||||
Model: "openllama_3b", |
|
||||||
Messages: []openai.ChatCompletionMessage{ |
|
||||||
{ |
|
||||||
Role: "user", |
|
||||||
Content: "What is the weather like in San Francisco (celsius)?", |
|
||||||
}, |
|
||||||
}, |
|
||||||
Functions: []openai.FunctionDefinition{ |
|
||||||
openai.FunctionDefinition{ |
|
||||||
Name: "get_current_weather", |
|
||||||
Description: "Get the current weather", |
|
||||||
Parameters: jsonschema.Definition{ |
|
||||||
Type: jsonschema.Object, |
|
||||||
Properties: map[string]jsonschema.Definition{ |
|
||||||
"location": { |
|
||||||
Type: jsonschema.String, |
|
||||||
Description: "The city and state, e.g. San Francisco, CA", |
|
||||||
}, |
|
||||||
"unit": { |
|
||||||
Type: jsonschema.String, |
|
||||||
Enum: []string{"celcius", "fahrenheit"}, |
|
||||||
}, |
|
||||||
}, |
|
||||||
Required: []string{"location"}, |
|
||||||
}, |
|
||||||
}, |
|
||||||
}, |
|
||||||
}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp2.Choices)).To(Equal(1)) |
|
||||||
Expect(resp2.Choices[0].Message.FunctionCall).ToNot(BeNil()) |
|
||||||
Expect(resp2.Choices[0].Message.FunctionCall.Name).To(Equal("get_current_weather"), resp2.Choices[0].Message.FunctionCall.Name) |
|
||||||
|
|
||||||
var res map[string]string |
|
||||||
err = json.Unmarshal([]byte(resp2.Choices[0].Message.FunctionCall.Arguments), &res) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(res["location"]).To(Equal("San Francisco"), fmt.Sprint(res)) |
|
||||||
Expect(res["unit"]).To(Equal("celcius"), fmt.Sprint(res)) |
|
||||||
Expect(string(resp2.Choices[0].FinishReason)).To(Equal("function_call"), fmt.Sprint(resp2.Choices[0].FinishReason)) |
|
||||||
}) |
|
||||||
|
|
||||||
It("runs gpt4all", Label("gpt4all"), func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
|
|
||||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ |
|
||||||
URL: "github:go-skynet/model-gallery/gpt4all-j.yaml", |
|
||||||
Name: "gpt4all-j", |
|
||||||
Overrides: map[string]string{}, |
|
||||||
}) |
|
||||||
|
|
||||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) |
|
||||||
|
|
||||||
uuid := response["uuid"].(string) |
|
||||||
|
|
||||||
Eventually(func() bool { |
|
||||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) |
|
||||||
return response["processed"].(bool) |
|
||||||
}, "360s", "10s").Should(Equal(true)) |
|
||||||
|
|
||||||
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "gpt4all-j", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "How are you?"}}}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Message.Content).To(ContainSubstring("well")) |
|
||||||
}) |
|
||||||
|
|
||||||
}) |
|
||||||
}) |
|
||||||
|
|
||||||
Context("Model gallery", func() { |
|
||||||
BeforeEach(func() { |
|
||||||
var err error |
|
||||||
tmpdir, err = os.MkdirTemp("", "") |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
|
|
||||||
modelLoader = model.NewModelLoader(tmpdir) |
|
||||||
c, cancel = context.WithCancel(context.Background()) |
|
||||||
|
|
||||||
galleries := []gallery.Gallery{ |
|
||||||
{ |
|
||||||
Name: "model-gallery", |
|
||||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/index.yaml", |
|
||||||
}, |
|
||||||
} |
|
||||||
|
|
||||||
app, err = App( |
|
||||||
append(commonOpts, |
|
||||||
options.WithContext(c), |
|
||||||
options.WithAudioDir(tmpdir), |
|
||||||
options.WithImageDir(tmpdir), |
|
||||||
options.WithGalleries(galleries), |
|
||||||
options.WithModelLoader(modelLoader), |
|
||||||
options.WithBackendAssets(backendAssets), |
|
||||||
options.WithBackendAssetsOutput(tmpdir))..., |
|
||||||
) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
go app.Listen("127.0.0.1:9090") |
|
||||||
|
|
||||||
defaultConfig := openai.DefaultConfig("") |
|
||||||
defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" |
|
||||||
|
|
||||||
client2 = openaigo.NewClient("") |
|
||||||
client2.BaseURL = defaultConfig.BaseURL |
|
||||||
|
|
||||||
// Wait for API to be ready
|
|
||||||
client = openai.NewClientWithConfig(defaultConfig) |
|
||||||
Eventually(func() error { |
|
||||||
_, err := client.ListModels(context.TODO()) |
|
||||||
return err |
|
||||||
}, "2m").ShouldNot(HaveOccurred()) |
|
||||||
}) |
|
||||||
|
|
||||||
AfterEach(func() { |
|
||||||
cancel() |
|
||||||
app.Shutdown() |
|
||||||
os.RemoveAll(tmpdir) |
|
||||||
}) |
|
||||||
It("installs and is capable to run tts", Label("tts"), func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
|
|
||||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ |
|
||||||
ID: "model-gallery@voice-en-us-kathleen-low", |
|
||||||
}) |
|
||||||
|
|
||||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) |
|
||||||
|
|
||||||
uuid := response["uuid"].(string) |
|
||||||
|
|
||||||
Eventually(func() bool { |
|
||||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) |
|
||||||
fmt.Println(response) |
|
||||||
return response["processed"].(bool) |
|
||||||
}, "360s", "10s").Should(Equal(true)) |
|
||||||
|
|
||||||
// An HTTP Post to the /tts endpoint should return a wav audio file
|
|
||||||
resp, err := http.Post("http://127.0.0.1:9090/tts", "application/json", bytes.NewBuffer([]byte(`{"input": "Hello world", "model": "en-us-kathleen-low.onnx"}`))) |
|
||||||
Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp)) |
|
||||||
dat, err := io.ReadAll(resp.Body) |
|
||||||
Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp)) |
|
||||||
|
|
||||||
Expect(resp.StatusCode).To(Equal(200), fmt.Sprint(string(dat))) |
|
||||||
Expect(resp.Header.Get("Content-Type")).To(Equal("audio/x-wav")) |
|
||||||
}) |
|
||||||
It("installs and is capable to generate images", Label("stablediffusion"), func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
|
|
||||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{ |
|
||||||
ID: "model-gallery@stablediffusion", |
|
||||||
}) |
|
||||||
|
|
||||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response)) |
|
||||||
|
|
||||||
uuid := response["uuid"].(string) |
|
||||||
|
|
||||||
Eventually(func() bool { |
|
||||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid) |
|
||||||
fmt.Println(response) |
|
||||||
return response["processed"].(bool) |
|
||||||
}, "360s", "10s").Should(Equal(true)) |
|
||||||
|
|
||||||
resp, err := http.Post( |
|
||||||
"http://127.0.0.1:9090/v1/images/generations", |
|
||||||
"application/json", |
|
||||||
bytes.NewBuffer([]byte(`{ |
|
||||||
"prompt": "floating hair, portrait, ((loli)), ((one girl)), cute face, hidden hands, asymmetrical bangs, beautiful detailed eyes, eye shadow, hair ornament, ribbons, bowties, buttons, pleated skirt, (((masterpiece))), ((best quality)), colorful|((part of the head)), ((((mutated hands and fingers)))), deformed, blurry, bad anatomy, disfigured, poorly drawn face, mutation, mutated, extra limb, ugly, poorly drawn hands, missing limb, blurry, floating limbs, disconnected limbs, malformed hands, blur, out of focus, long neck, long body, Octane renderer, lowres, bad anatomy, bad hands, text", |
|
||||||
"mode": 2, "seed":9000, |
|
||||||
"size": "256x256", "n":2}`))) |
|
||||||
// The response should contain an URL
|
|
||||||
Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp)) |
|
||||||
dat, err := io.ReadAll(resp.Body) |
|
||||||
Expect(err).ToNot(HaveOccurred(), string(dat)) |
|
||||||
Expect(string(dat)).To(ContainSubstring("http://127.0.0.1:9090/"), string(dat)) |
|
||||||
Expect(string(dat)).To(ContainSubstring(".png"), string(dat)) |
|
||||||
|
|
||||||
}) |
|
||||||
}) |
|
||||||
|
|
||||||
Context("API query", func() { |
|
||||||
BeforeEach(func() { |
|
||||||
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH")) |
|
||||||
c, cancel = context.WithCancel(context.Background()) |
|
||||||
|
|
||||||
var err error |
|
||||||
app, err = App( |
|
||||||
append(commonOpts, |
|
||||||
options.WithExternalBackend("huggingface", os.Getenv("HUGGINGFACE_GRPC")), |
|
||||||
options.WithContext(c), |
|
||||||
options.WithModelLoader(modelLoader), |
|
||||||
)...) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
go app.Listen("127.0.0.1:9090") |
|
||||||
|
|
||||||
defaultConfig := openai.DefaultConfig("") |
|
||||||
defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" |
|
||||||
|
|
||||||
client2 = openaigo.NewClient("") |
|
||||||
client2.BaseURL = defaultConfig.BaseURL |
|
||||||
|
|
||||||
// Wait for API to be ready
|
|
||||||
client = openai.NewClientWithConfig(defaultConfig) |
|
||||||
Eventually(func() error { |
|
||||||
_, err := client.ListModels(context.TODO()) |
|
||||||
return err |
|
||||||
}, "2m").ShouldNot(HaveOccurred()) |
|
||||||
}) |
|
||||||
AfterEach(func() { |
|
||||||
cancel() |
|
||||||
app.Shutdown() |
|
||||||
}) |
|
||||||
It("returns the models list", func() { |
|
||||||
models, err := client.ListModels(context.TODO()) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(models.Models)).To(Equal(11)) |
|
||||||
}) |
|
||||||
It("can generate completions", func() { |
|
||||||
resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "testmodel", Prompt: "abcdedfghikl"}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Text).ToNot(BeEmpty()) |
|
||||||
}) |
|
||||||
|
|
||||||
It("can generate chat completions ", func() { |
|
||||||
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "testmodel", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "abcdedfghikl"}}}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Message.Content).ToNot(BeEmpty()) |
|
||||||
}) |
|
||||||
|
|
||||||
It("can generate completions from model configs", func() { |
|
||||||
resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "gpt4all", Prompt: "abcdedfghikl"}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Text).ToNot(BeEmpty()) |
|
||||||
}) |
|
||||||
|
|
||||||
It("can generate chat completions from model configs", func() { |
|
||||||
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "gpt4all-2", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "abcdedfghikl"}}}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Message.Content).ToNot(BeEmpty()) |
|
||||||
}) |
|
||||||
|
|
||||||
It("returns errors", func() { |
|
||||||
backends := len(model.AutoLoadBackends) + 1 // +1 for huggingface
|
|
||||||
_, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "foomodel", Prompt: "abcdedfghikl"}) |
|
||||||
Expect(err).To(HaveOccurred()) |
|
||||||
Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("error, status code: 500, message: could not load model - all backends returned error: %d errors occurred:", backends))) |
|
||||||
}) |
|
||||||
It("transcribes audio", func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
resp, err := client.CreateTranscription( |
|
||||||
context.Background(), |
|
||||||
openai.AudioRequest{ |
|
||||||
Model: openai.Whisper1, |
|
||||||
FilePath: filepath.Join(os.Getenv("TEST_DIR"), "audio.wav"), |
|
||||||
}, |
|
||||||
) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(resp.Text).To(ContainSubstring("This is the Micro Machine Man presenting")) |
|
||||||
}) |
|
||||||
|
|
||||||
It("calculate embeddings", func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
resp, err := client.CreateEmbeddings( |
|
||||||
context.Background(), |
|
||||||
openai.EmbeddingRequest{ |
|
||||||
Model: openai.AdaEmbeddingV2, |
|
||||||
Input: []string{"sun", "cat"}, |
|
||||||
}, |
|
||||||
) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Data[0].Embedding)).To(BeNumerically("==", 384)) |
|
||||||
Expect(len(resp.Data[1].Embedding)).To(BeNumerically("==", 384)) |
|
||||||
|
|
||||||
sunEmbedding := resp.Data[0].Embedding |
|
||||||
resp2, err := client.CreateEmbeddings( |
|
||||||
context.Background(), |
|
||||||
openai.EmbeddingRequest{ |
|
||||||
Model: openai.AdaEmbeddingV2, |
|
||||||
Input: []string{"sun"}, |
|
||||||
}, |
|
||||||
) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(resp2.Data[0].Embedding).To(Equal(sunEmbedding)) |
|
||||||
}) |
|
||||||
|
|
||||||
Context("External gRPC calls", func() { |
|
||||||
It("calculate embeddings with huggingface", func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
resp, err := client.CreateEmbeddings( |
|
||||||
context.Background(), |
|
||||||
openai.EmbeddingRequest{ |
|
||||||
Model: openai.AdaCodeSearchCode, |
|
||||||
Input: []string{"sun", "cat"}, |
|
||||||
}, |
|
||||||
) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Data[0].Embedding)).To(BeNumerically("==", 384)) |
|
||||||
Expect(len(resp.Data[1].Embedding)).To(BeNumerically("==", 384)) |
|
||||||
|
|
||||||
sunEmbedding := resp.Data[0].Embedding |
|
||||||
resp2, err := client.CreateEmbeddings( |
|
||||||
context.Background(), |
|
||||||
openai.EmbeddingRequest{ |
|
||||||
Model: openai.AdaCodeSearchCode, |
|
||||||
Input: []string{"sun"}, |
|
||||||
}, |
|
||||||
) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(resp2.Data[0].Embedding).To(Equal(sunEmbedding)) |
|
||||||
Expect(resp2.Data[0].Embedding).ToNot(Equal(resp.Data[1].Embedding)) |
|
||||||
}) |
|
||||||
}) |
|
||||||
|
|
||||||
Context("backends", func() { |
|
||||||
It("runs rwkv completion", func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "rwkv_test", Prompt: "Count up to five: one, two, three, four,"}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices) > 0).To(BeTrue()) |
|
||||||
Expect(resp.Choices[0].Text).To(ContainSubstring("five")) |
|
||||||
|
|
||||||
stream, err := client.CreateCompletionStream(context.TODO(), openai.CompletionRequest{ |
|
||||||
Model: "rwkv_test", Prompt: "Count up to five: one, two, three, four,", Stream: true, |
|
||||||
}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
defer stream.Close() |
|
||||||
|
|
||||||
tokens := 0 |
|
||||||
text := "" |
|
||||||
for { |
|
||||||
response, err := stream.Recv() |
|
||||||
if errors.Is(err, io.EOF) { |
|
||||||
break |
|
||||||
} |
|
||||||
|
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
text += response.Choices[0].Text |
|
||||||
tokens++ |
|
||||||
} |
|
||||||
Expect(text).ToNot(BeEmpty()) |
|
||||||
Expect(text).To(ContainSubstring("five")) |
|
||||||
Expect(tokens).ToNot(Or(Equal(1), Equal(0))) |
|
||||||
}) |
|
||||||
It("runs rwkv chat completion", func() { |
|
||||||
if runtime.GOOS != "linux" { |
|
||||||
Skip("test supported only on linux") |
|
||||||
} |
|
||||||
resp, err := client.CreateChatCompletion(context.TODO(), |
|
||||||
openai.ChatCompletionRequest{Model: "rwkv_test", Messages: []openai.ChatCompletionMessage{{Content: "Can you count up to five?", Role: "user"}}}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices) > 0).To(BeTrue()) |
|
||||||
Expect(resp.Choices[0].Message.Content).To(Or(ContainSubstring("Sure"), ContainSubstring("five"))) |
|
||||||
|
|
||||||
stream, err := client.CreateChatCompletionStream(context.TODO(), openai.ChatCompletionRequest{Model: "rwkv_test", Messages: []openai.ChatCompletionMessage{{Content: "Can you count up to five?", Role: "user"}}}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
defer stream.Close() |
|
||||||
|
|
||||||
tokens := 0 |
|
||||||
text := "" |
|
||||||
for { |
|
||||||
response, err := stream.Recv() |
|
||||||
if errors.Is(err, io.EOF) { |
|
||||||
break |
|
||||||
} |
|
||||||
|
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
text += response.Choices[0].Delta.Content |
|
||||||
tokens++ |
|
||||||
} |
|
||||||
Expect(text).ToNot(BeEmpty()) |
|
||||||
Expect(text).To(Or(ContainSubstring("Sure"), ContainSubstring("five"))) |
|
||||||
|
|
||||||
Expect(tokens).ToNot(Or(Equal(1), Equal(0))) |
|
||||||
}) |
|
||||||
}) |
|
||||||
}) |
|
||||||
|
|
||||||
Context("Config file", func() { |
|
||||||
BeforeEach(func() { |
|
||||||
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH")) |
|
||||||
c, cancel = context.WithCancel(context.Background()) |
|
||||||
|
|
||||||
var err error |
|
||||||
app, err = App( |
|
||||||
append(commonOpts, |
|
||||||
options.WithContext(c), |
|
||||||
options.WithModelLoader(modelLoader), |
|
||||||
options.WithConfigFile(os.Getenv("CONFIG_FILE")))..., |
|
||||||
) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
go app.Listen("127.0.0.1:9090") |
|
||||||
|
|
||||||
defaultConfig := openai.DefaultConfig("") |
|
||||||
defaultConfig.BaseURL = "http://127.0.0.1:9090/v1" |
|
||||||
client2 = openaigo.NewClient("") |
|
||||||
client2.BaseURL = defaultConfig.BaseURL |
|
||||||
// Wait for API to be ready
|
|
||||||
client = openai.NewClientWithConfig(defaultConfig) |
|
||||||
Eventually(func() error { |
|
||||||
_, err := client.ListModels(context.TODO()) |
|
||||||
return err |
|
||||||
}, "2m").ShouldNot(HaveOccurred()) |
|
||||||
}) |
|
||||||
AfterEach(func() { |
|
||||||
cancel() |
|
||||||
app.Shutdown() |
|
||||||
}) |
|
||||||
It("can generate chat completions from config file", func() { |
|
||||||
models, err := client.ListModels(context.TODO()) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(models.Models)).To(Equal(13)) |
|
||||||
}) |
|
||||||
It("can generate chat completions from config file", func() { |
|
||||||
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "list1", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "abcdedfghikl"}}}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Message.Content).ToNot(BeEmpty()) |
|
||||||
}) |
|
||||||
It("can generate chat completions from config file", func() { |
|
||||||
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "list2", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "abcdedfghikl"}}}) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Message.Content).ToNot(BeEmpty()) |
|
||||||
}) |
|
||||||
It("can generate edit completions from config file", func() { |
|
||||||
request := openaigo.EditCreateRequestBody{ |
|
||||||
Model: "list2", |
|
||||||
Instruction: "foo", |
|
||||||
Input: "bar", |
|
||||||
} |
|
||||||
resp, err := client2.CreateEdit(context.Background(), request) |
|
||||||
Expect(err).ToNot(HaveOccurred()) |
|
||||||
Expect(len(resp.Choices)).To(Equal(1)) |
|
||||||
Expect(resp.Choices[0].Text).ToNot(BeEmpty()) |
|
||||||
}) |
|
||||||
|
|
||||||
}) |
|
||||||
}) |
|
@ -1,13 +0,0 @@ |
|||||||
package api_test |
|
||||||
|
|
||||||
import ( |
|
||||||
"testing" |
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo/v2" |
|
||||||
. "github.com/onsi/gomega" |
|
||||||
) |
|
||||||
|
|
||||||
func TestLocalAI(t *testing.T) { |
|
||||||
RegisterFailHandler(Fail) |
|
||||||
RunSpecs(t, "LocalAI test suite") |
|
||||||
} |
|
@ -1,109 +0,0 @@ |
|||||||
package backend |
|
||||||
|
|
||||||
import ( |
|
||||||
"fmt" |
|
||||||
"sync" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
) |
|
||||||
|
|
||||||
func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, c config.Config, o *options.Option) (func() ([]float32, error), error) { |
|
||||||
if !c.Embeddings { |
|
||||||
return nil, fmt.Errorf("endpoint disabled for this model by API configuration") |
|
||||||
} |
|
||||||
|
|
||||||
modelFile := c.Model |
|
||||||
|
|
||||||
grpcOpts := gRPCModelOpts(c) |
|
||||||
|
|
||||||
var inferenceModel interface{} |
|
||||||
var err error |
|
||||||
|
|
||||||
opts := []model.Option{ |
|
||||||
model.WithLoadGRPCLLMModelOpts(grpcOpts), |
|
||||||
model.WithThreads(uint32(c.Threads)), |
|
||||||
model.WithAssetDir(o.AssetsDestination), |
|
||||||
model.WithModelFile(modelFile), |
|
||||||
model.WithContext(o.Context), |
|
||||||
} |
|
||||||
|
|
||||||
for k, v := range o.ExternalGRPCBackends { |
|
||||||
opts = append(opts, model.WithExternalBackend(k, v)) |
|
||||||
} |
|
||||||
|
|
||||||
if c.Backend == "" { |
|
||||||
inferenceModel, err = loader.GreedyLoader(opts...) |
|
||||||
} else { |
|
||||||
opts = append(opts, model.WithBackendString(c.Backend)) |
|
||||||
inferenceModel, err = loader.BackendLoader(opts...) |
|
||||||
} |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
var fn func() ([]float32, error) |
|
||||||
switch model := inferenceModel.(type) { |
|
||||||
case *grpc.Client: |
|
||||||
fn = func() ([]float32, error) { |
|
||||||
predictOptions := gRPCPredictOpts(c, loader.ModelPath) |
|
||||||
if len(tokens) > 0 { |
|
||||||
embeds := []int32{} |
|
||||||
|
|
||||||
for _, t := range tokens { |
|
||||||
embeds = append(embeds, int32(t)) |
|
||||||
} |
|
||||||
predictOptions.EmbeddingTokens = embeds |
|
||||||
|
|
||||||
res, err := model.Embeddings(o.Context, predictOptions) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
return res.Embeddings, nil |
|
||||||
} |
|
||||||
predictOptions.Embeddings = s |
|
||||||
|
|
||||||
res, err := model.Embeddings(o.Context, predictOptions) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
return res.Embeddings, nil |
|
||||||
} |
|
||||||
default: |
|
||||||
fn = func() ([]float32, error) { |
|
||||||
return nil, fmt.Errorf("embeddings not supported by the backend") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return func() ([]float32, error) { |
|
||||||
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
|
||||||
mutexMap.Lock() |
|
||||||
l, ok := mutexes[modelFile] |
|
||||||
if !ok { |
|
||||||
m := &sync.Mutex{} |
|
||||||
mutexes[modelFile] = m |
|
||||||
l = m |
|
||||||
} |
|
||||||
mutexMap.Unlock() |
|
||||||
l.Lock() |
|
||||||
defer l.Unlock() |
|
||||||
|
|
||||||
embeds, err := fn() |
|
||||||
if err != nil { |
|
||||||
return embeds, err |
|
||||||
} |
|
||||||
// Remove trailing 0s
|
|
||||||
for i := len(embeds) - 1; i >= 0; i-- { |
|
||||||
if embeds[i] == 0.0 { |
|
||||||
embeds = embeds[:i] |
|
||||||
} else { |
|
||||||
break |
|
||||||
} |
|
||||||
} |
|
||||||
return embeds, nil |
|
||||||
}, nil |
|
||||||
} |
|
@ -1,68 +0,0 @@ |
|||||||
package backend |
|
||||||
|
|
||||||
import ( |
|
||||||
"fmt" |
|
||||||
"sync" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
) |
|
||||||
|
|
||||||
func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negative_prompt, dst string, loader *model.ModelLoader, c config.Config, o *options.Option) (func() error, error) { |
|
||||||
if c.Backend != model.StableDiffusionBackend { |
|
||||||
return nil, fmt.Errorf("endpoint only working with stablediffusion models") |
|
||||||
} |
|
||||||
|
|
||||||
opts := []model.Option{ |
|
||||||
model.WithBackendString(c.Backend), |
|
||||||
model.WithAssetDir(o.AssetsDestination), |
|
||||||
model.WithThreads(uint32(c.Threads)), |
|
||||||
model.WithContext(o.Context), |
|
||||||
model.WithModelFile(c.ImageGenerationAssets), |
|
||||||
} |
|
||||||
|
|
||||||
for k, v := range o.ExternalGRPCBackends { |
|
||||||
opts = append(opts, model.WithExternalBackend(k, v)) |
|
||||||
} |
|
||||||
|
|
||||||
inferenceModel, err := loader.BackendLoader( |
|
||||||
opts..., |
|
||||||
) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
fn := func() error { |
|
||||||
_, err := inferenceModel.GenerateImage( |
|
||||||
o.Context, |
|
||||||
&proto.GenerateImageRequest{ |
|
||||||
Height: int32(height), |
|
||||||
Width: int32(width), |
|
||||||
Mode: int32(mode), |
|
||||||
Step: int32(step), |
|
||||||
Seed: int32(seed), |
|
||||||
PositivePrompt: positive_prompt, |
|
||||||
NegativePrompt: negative_prompt, |
|
||||||
Dst: dst, |
|
||||||
}) |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
return func() error { |
|
||||||
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
|
||||||
mutexMap.Lock() |
|
||||||
l, ok := mutexes[c.Backend] |
|
||||||
if !ok { |
|
||||||
m := &sync.Mutex{} |
|
||||||
mutexes[c.Backend] = m |
|
||||||
l = m |
|
||||||
} |
|
||||||
mutexMap.Unlock() |
|
||||||
l.Lock() |
|
||||||
defer l.Unlock() |
|
||||||
|
|
||||||
return fn() |
|
||||||
}, nil |
|
||||||
} |
|
@ -1,124 +0,0 @@ |
|||||||
package backend |
|
||||||
|
|
||||||
import ( |
|
||||||
"os" |
|
||||||
"regexp" |
|
||||||
"strings" |
|
||||||
"sync" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/gallery" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/utils" |
|
||||||
) |
|
||||||
|
|
||||||
func ModelInference(s string, loader *model.ModelLoader, c config.Config, o *options.Option, tokenCallback func(string) bool) (func() (string, error), error) { |
|
||||||
modelFile := c.Model |
|
||||||
|
|
||||||
grpcOpts := gRPCModelOpts(c) |
|
||||||
|
|
||||||
var inferenceModel *grpc.Client |
|
||||||
var err error |
|
||||||
|
|
||||||
opts := []model.Option{ |
|
||||||
model.WithLoadGRPCLLMModelOpts(grpcOpts), |
|
||||||
model.WithThreads(uint32(c.Threads)), // some models uses this to allocate threads during startup
|
|
||||||
model.WithAssetDir(o.AssetsDestination), |
|
||||||
model.WithModelFile(modelFile), |
|
||||||
model.WithContext(o.Context), |
|
||||||
} |
|
||||||
|
|
||||||
for k, v := range o.ExternalGRPCBackends { |
|
||||||
opts = append(opts, model.WithExternalBackend(k, v)) |
|
||||||
} |
|
||||||
|
|
||||||
if c.Backend != "" { |
|
||||||
opts = append(opts, model.WithBackendString(c.Backend)) |
|
||||||
} |
|
||||||
|
|
||||||
// Check if the modelFile exists, if it doesn't try to load it from the gallery
|
|
||||||
if o.AutoloadGalleries { // experimental
|
|
||||||
if _, err := os.Stat(modelFile); os.IsNotExist(err) { |
|
||||||
utils.ResetDownloadTimers() |
|
||||||
// if we failed to load the model, we try to download it
|
|
||||||
err := gallery.InstallModelFromGalleryByName(o.Galleries, modelFile, loader.ModelPath, gallery.GalleryModel{}, utils.DisplayDownloadFunction) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if c.Backend == "" { |
|
||||||
inferenceModel, err = loader.GreedyLoader(opts...) |
|
||||||
} else { |
|
||||||
inferenceModel, err = loader.BackendLoader(opts...) |
|
||||||
} |
|
||||||
|
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
// in GRPC, the backend is supposed to answer to 1 single token if stream is not supported
|
|
||||||
fn := func() (string, error) { |
|
||||||
opts := gRPCPredictOpts(c, loader.ModelPath) |
|
||||||
opts.Prompt = s |
|
||||||
if tokenCallback != nil { |
|
||||||
ss := "" |
|
||||||
err := inferenceModel.PredictStream(o.Context, opts, func(s string) { |
|
||||||
tokenCallback(s) |
|
||||||
ss += s |
|
||||||
}) |
|
||||||
return ss, err |
|
||||||
} else { |
|
||||||
reply, err := inferenceModel.Predict(o.Context, opts) |
|
||||||
if err != nil { |
|
||||||
return "", err |
|
||||||
} |
|
||||||
return reply.Message, err |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return func() (string, error) { |
|
||||||
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
|
||||||
mutexMap.Lock() |
|
||||||
l, ok := mutexes[modelFile] |
|
||||||
if !ok { |
|
||||||
m := &sync.Mutex{} |
|
||||||
mutexes[modelFile] = m |
|
||||||
l = m |
|
||||||
} |
|
||||||
mutexMap.Unlock() |
|
||||||
l.Lock() |
|
||||||
defer l.Unlock() |
|
||||||
|
|
||||||
return fn() |
|
||||||
}, nil |
|
||||||
} |
|
||||||
|
|
||||||
var cutstrings map[string]*regexp.Regexp = make(map[string]*regexp.Regexp) |
|
||||||
var mu sync.Mutex = sync.Mutex{} |
|
||||||
|
|
||||||
func Finetune(config config.Config, input, prediction string) string { |
|
||||||
if config.Echo { |
|
||||||
prediction = input + prediction |
|
||||||
} |
|
||||||
|
|
||||||
for _, c := range config.Cutstrings { |
|
||||||
mu.Lock() |
|
||||||
reg, ok := cutstrings[c] |
|
||||||
if !ok { |
|
||||||
cutstrings[c] = regexp.MustCompile(c) |
|
||||||
reg = cutstrings[c] |
|
||||||
} |
|
||||||
mu.Unlock() |
|
||||||
prediction = reg.ReplaceAllString(prediction, "") |
|
||||||
} |
|
||||||
|
|
||||||
for _, c := range config.TrimSpace { |
|
||||||
prediction = strings.TrimSpace(strings.TrimPrefix(prediction, c)) |
|
||||||
} |
|
||||||
return prediction |
|
||||||
|
|
||||||
} |
|
@ -1,22 +0,0 @@ |
|||||||
package backend |
|
||||||
|
|
||||||
import "sync" |
|
||||||
|
|
||||||
// mutex still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
|
||||||
var mutexMap sync.Mutex |
|
||||||
var mutexes map[string]*sync.Mutex = make(map[string]*sync.Mutex) |
|
||||||
|
|
||||||
func Lock(s string) *sync.Mutex { |
|
||||||
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
|
||||||
mutexMap.Lock() |
|
||||||
l, ok := mutexes[s] |
|
||||||
if !ok { |
|
||||||
m := &sync.Mutex{} |
|
||||||
mutexes[s] = m |
|
||||||
l = m |
|
||||||
} |
|
||||||
mutexMap.Unlock() |
|
||||||
l.Lock() |
|
||||||
|
|
||||||
return l |
|
||||||
} |
|
@ -1,72 +0,0 @@ |
|||||||
package backend |
|
||||||
|
|
||||||
import ( |
|
||||||
"os" |
|
||||||
"path/filepath" |
|
||||||
|
|
||||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
) |
|
||||||
|
|
||||||
func gRPCModelOpts(c config.Config) *pb.ModelOptions { |
|
||||||
b := 512 |
|
||||||
if c.Batch != 0 { |
|
||||||
b = c.Batch |
|
||||||
} |
|
||||||
return &pb.ModelOptions{ |
|
||||||
ContextSize: int32(c.ContextSize), |
|
||||||
Seed: int32(c.Seed), |
|
||||||
NBatch: int32(b), |
|
||||||
F16Memory: c.F16, |
|
||||||
MLock: c.MMlock, |
|
||||||
NUMA: c.NUMA, |
|
||||||
Embeddings: c.Embeddings, |
|
||||||
LowVRAM: c.LowVRAM, |
|
||||||
NGPULayers: int32(c.NGPULayers), |
|
||||||
MMap: c.MMap, |
|
||||||
MainGPU: c.MainGPU, |
|
||||||
Threads: int32(c.Threads), |
|
||||||
TensorSplit: c.TensorSplit, |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func gRPCPredictOpts(c config.Config, modelPath string) *pb.PredictOptions { |
|
||||||
promptCachePath := "" |
|
||||||
if c.PromptCachePath != "" { |
|
||||||
p := filepath.Join(modelPath, c.PromptCachePath) |
|
||||||
os.MkdirAll(filepath.Dir(p), 0755) |
|
||||||
promptCachePath = p |
|
||||||
} |
|
||||||
return &pb.PredictOptions{ |
|
||||||
Temperature: float32(c.Temperature), |
|
||||||
TopP: float32(c.TopP), |
|
||||||
TopK: int32(c.TopK), |
|
||||||
Tokens: int32(c.Maxtokens), |
|
||||||
Threads: int32(c.Threads), |
|
||||||
PromptCacheAll: c.PromptCacheAll, |
|
||||||
PromptCacheRO: c.PromptCacheRO, |
|
||||||
PromptCachePath: promptCachePath, |
|
||||||
F16KV: c.F16, |
|
||||||
DebugMode: c.Debug, |
|
||||||
Grammar: c.Grammar, |
|
||||||
|
|
||||||
Mirostat: int32(c.Mirostat), |
|
||||||
MirostatETA: float32(c.MirostatETA), |
|
||||||
MirostatTAU: float32(c.MirostatTAU), |
|
||||||
Debug: c.Debug, |
|
||||||
StopPrompts: c.StopWords, |
|
||||||
Repeat: int32(c.RepeatPenalty), |
|
||||||
NKeep: int32(c.Keep), |
|
||||||
Batch: int32(c.Batch), |
|
||||||
IgnoreEOS: c.IgnoreEOS, |
|
||||||
Seed: int32(c.Seed), |
|
||||||
FrequencyPenalty: float32(c.FrequencyPenalty), |
|
||||||
MLock: c.MMlock, |
|
||||||
MMap: c.MMap, |
|
||||||
MainGPU: c.MainGPU, |
|
||||||
TensorSplit: c.TensorSplit, |
|
||||||
TailFreeSamplingZ: float32(c.TFZ), |
|
||||||
TypicalP: float32(c.TypicalP), |
|
||||||
} |
|
||||||
} |
|
@ -1,42 +0,0 @@ |
|||||||
package backend |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"fmt" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/grpc/whisper/api" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
) |
|
||||||
|
|
||||||
func ModelTranscription(audio, language string, loader *model.ModelLoader, c config.Config, o *options.Option) (*api.Result, error) { |
|
||||||
opts := []model.Option{ |
|
||||||
model.WithBackendString(model.WhisperBackend), |
|
||||||
model.WithModelFile(c.Model), |
|
||||||
model.WithContext(o.Context), |
|
||||||
model.WithThreads(uint32(c.Threads)), |
|
||||||
model.WithAssetDir(o.AssetsDestination), |
|
||||||
} |
|
||||||
|
|
||||||
for k, v := range o.ExternalGRPCBackends { |
|
||||||
opts = append(opts, model.WithExternalBackend(k, v)) |
|
||||||
} |
|
||||||
|
|
||||||
whisperModel, err := o.Loader.BackendLoader(opts...) |
|
||||||
if err != nil { |
|
||||||
return nil, err |
|
||||||
} |
|
||||||
|
|
||||||
if whisperModel == nil { |
|
||||||
return nil, fmt.Errorf("could not load whisper model") |
|
||||||
} |
|
||||||
|
|
||||||
return whisperModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{ |
|
||||||
Dst: audio, |
|
||||||
Language: language, |
|
||||||
Threads: uint32(c.Threads), |
|
||||||
}) |
|
||||||
} |
|
@ -1,72 +0,0 @@ |
|||||||
package backend |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"fmt" |
|
||||||
"os" |
|
||||||
"path/filepath" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/utils" |
|
||||||
) |
|
||||||
|
|
||||||
func generateUniqueFileName(dir, baseName, ext string) string { |
|
||||||
counter := 1 |
|
||||||
fileName := baseName + ext |
|
||||||
|
|
||||||
for { |
|
||||||
filePath := filepath.Join(dir, fileName) |
|
||||||
_, err := os.Stat(filePath) |
|
||||||
if os.IsNotExist(err) { |
|
||||||
return fileName |
|
||||||
} |
|
||||||
|
|
||||||
counter++ |
|
||||||
fileName = fmt.Sprintf("%s_%d%s", baseName, counter, ext) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func ModelTTS(text, modelFile string, loader *model.ModelLoader, o *options.Option) (string, *proto.Result, error) { |
|
||||||
opts := []model.Option{ |
|
||||||
model.WithBackendString(model.PiperBackend), |
|
||||||
model.WithModelFile(modelFile), |
|
||||||
model.WithContext(o.Context), |
|
||||||
model.WithAssetDir(o.AssetsDestination), |
|
||||||
} |
|
||||||
|
|
||||||
for k, v := range o.ExternalGRPCBackends { |
|
||||||
opts = append(opts, model.WithExternalBackend(k, v)) |
|
||||||
} |
|
||||||
|
|
||||||
piperModel, err := o.Loader.BackendLoader(opts...) |
|
||||||
if err != nil { |
|
||||||
return "", nil, err |
|
||||||
} |
|
||||||
|
|
||||||
if piperModel == nil { |
|
||||||
return "", nil, fmt.Errorf("could not load piper model") |
|
||||||
} |
|
||||||
|
|
||||||
if err := os.MkdirAll(o.AudioDir, 0755); err != nil { |
|
||||||
return "", nil, fmt.Errorf("failed creating audio directory: %s", err) |
|
||||||
} |
|
||||||
|
|
||||||
fileName := generateUniqueFileName(o.AudioDir, "piper", ".wav") |
|
||||||
filePath := filepath.Join(o.AudioDir, fileName) |
|
||||||
|
|
||||||
modelPath := filepath.Join(o.Loader.ModelPath, modelFile) |
|
||||||
|
|
||||||
if err := utils.VerifyPath(modelPath, o.Loader.ModelPath); err != nil { |
|
||||||
return "", nil, err |
|
||||||
} |
|
||||||
|
|
||||||
res, err := piperModel.TTS(context.Background(), &proto.TTSRequest{ |
|
||||||
Text: text, |
|
||||||
Model: modelPath, |
|
||||||
Dst: filePath, |
|
||||||
}) |
|
||||||
|
|
||||||
return filePath, res, err |
|
||||||
} |
|
@ -1,209 +0,0 @@ |
|||||||
package api_config |
|
||||||
|
|
||||||
import ( |
|
||||||
"fmt" |
|
||||||
"io/fs" |
|
||||||
"os" |
|
||||||
"path/filepath" |
|
||||||
"strings" |
|
||||||
"sync" |
|
||||||
|
|
||||||
"gopkg.in/yaml.v3" |
|
||||||
) |
|
||||||
|
|
||||||
type Config struct { |
|
||||||
PredictionOptions `yaml:"parameters"` |
|
||||||
Name string `yaml:"name"` |
|
||||||
StopWords []string `yaml:"stopwords"` |
|
||||||
Cutstrings []string `yaml:"cutstrings"` |
|
||||||
TrimSpace []string `yaml:"trimspace"` |
|
||||||
ContextSize int `yaml:"context_size"` |
|
||||||
F16 bool `yaml:"f16"` |
|
||||||
NUMA bool `yaml:"numa"` |
|
||||||
Threads int `yaml:"threads"` |
|
||||||
Debug bool `yaml:"debug"` |
|
||||||
Roles map[string]string `yaml:"roles"` |
|
||||||
Embeddings bool `yaml:"embeddings"` |
|
||||||
Backend string `yaml:"backend"` |
|
||||||
TemplateConfig TemplateConfig `yaml:"template"` |
|
||||||
MirostatETA float64 `yaml:"mirostat_eta"` |
|
||||||
MirostatTAU float64 `yaml:"mirostat_tau"` |
|
||||||
Mirostat int `yaml:"mirostat"` |
|
||||||
NGPULayers int `yaml:"gpu_layers"` |
|
||||||
MMap bool `yaml:"mmap"` |
|
||||||
MMlock bool `yaml:"mmlock"` |
|
||||||
LowVRAM bool `yaml:"low_vram"` |
|
||||||
|
|
||||||
TensorSplit string `yaml:"tensor_split"` |
|
||||||
MainGPU string `yaml:"main_gpu"` |
|
||||||
ImageGenerationAssets string `yaml:"asset_dir"` |
|
||||||
|
|
||||||
PromptCachePath string `yaml:"prompt_cache_path"` |
|
||||||
PromptCacheAll bool `yaml:"prompt_cache_all"` |
|
||||||
PromptCacheRO bool `yaml:"prompt_cache_ro"` |
|
||||||
|
|
||||||
Grammar string `yaml:"grammar"` |
|
||||||
|
|
||||||
PromptStrings, InputStrings []string |
|
||||||
InputToken [][]int |
|
||||||
functionCallString, functionCallNameString string |
|
||||||
|
|
||||||
FunctionsConfig Functions `yaml:"function"` |
|
||||||
} |
|
||||||
|
|
||||||
type Functions struct { |
|
||||||
DisableNoAction bool `yaml:"disable_no_action"` |
|
||||||
NoActionFunctionName string `yaml:"no_action_function_name"` |
|
||||||
NoActionDescriptionName string `yaml:"no_action_description_name"` |
|
||||||
} |
|
||||||
|
|
||||||
type TemplateConfig struct { |
|
||||||
Completion string `yaml:"completion"` |
|
||||||
Functions string `yaml:"function"` |
|
||||||
Chat string `yaml:"chat"` |
|
||||||
Edit string `yaml:"edit"` |
|
||||||
} |
|
||||||
|
|
||||||
type ConfigLoader struct { |
|
||||||
configs map[string]Config |
|
||||||
sync.Mutex |
|
||||||
} |
|
||||||
|
|
||||||
func (c *Config) SetFunctionCallString(s string) { |
|
||||||
c.functionCallString = s |
|
||||||
} |
|
||||||
|
|
||||||
func (c *Config) SetFunctionCallNameString(s string) { |
|
||||||
c.functionCallNameString = s |
|
||||||
} |
|
||||||
|
|
||||||
func (c *Config) ShouldUseFunctions() bool { |
|
||||||
return ((c.functionCallString != "none" || c.functionCallString == "") || c.ShouldCallSpecificFunction()) |
|
||||||
} |
|
||||||
|
|
||||||
func (c *Config) ShouldCallSpecificFunction() bool { |
|
||||||
return len(c.functionCallNameString) > 0 |
|
||||||
} |
|
||||||
|
|
||||||
func (c *Config) FunctionToCall() string { |
|
||||||
return c.functionCallNameString |
|
||||||
} |
|
||||||
|
|
||||||
func defaultPredictOptions(modelFile string) PredictionOptions { |
|
||||||
return PredictionOptions{ |
|
||||||
TopP: 0.7, |
|
||||||
TopK: 80, |
|
||||||
Maxtokens: 512, |
|
||||||
Temperature: 0.9, |
|
||||||
Model: modelFile, |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func DefaultConfig(modelFile string) *Config { |
|
||||||
return &Config{ |
|
||||||
PredictionOptions: defaultPredictOptions(modelFile), |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func NewConfigLoader() *ConfigLoader { |
|
||||||
return &ConfigLoader{ |
|
||||||
configs: make(map[string]Config), |
|
||||||
} |
|
||||||
} |
|
||||||
func ReadConfigFile(file string) ([]*Config, error) { |
|
||||||
c := &[]*Config{} |
|
||||||
f, err := os.ReadFile(file) |
|
||||||
if err != nil { |
|
||||||
return nil, fmt.Errorf("cannot read config file: %w", err) |
|
||||||
} |
|
||||||
if err := yaml.Unmarshal(f, c); err != nil { |
|
||||||
return nil, fmt.Errorf("cannot unmarshal config file: %w", err) |
|
||||||
} |
|
||||||
|
|
||||||
return *c, nil |
|
||||||
} |
|
||||||
|
|
||||||
func ReadConfig(file string) (*Config, error) { |
|
||||||
c := &Config{} |
|
||||||
f, err := os.ReadFile(file) |
|
||||||
if err != nil { |
|
||||||
return nil, fmt.Errorf("cannot read config file: %w", err) |
|
||||||
} |
|
||||||
if err := yaml.Unmarshal(f, c); err != nil { |
|
||||||
return nil, fmt.Errorf("cannot unmarshal config file: %w", err) |
|
||||||
} |
|
||||||
|
|
||||||
return c, nil |
|
||||||
} |
|
||||||
|
|
||||||
func (cm *ConfigLoader) LoadConfigFile(file string) error { |
|
||||||
cm.Lock() |
|
||||||
defer cm.Unlock() |
|
||||||
c, err := ReadConfigFile(file) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("cannot load config file: %w", err) |
|
||||||
} |
|
||||||
|
|
||||||
for _, cc := range c { |
|
||||||
cm.configs[cc.Name] = *cc |
|
||||||
} |
|
||||||
return nil |
|
||||||
} |
|
||||||
|
|
||||||
func (cm *ConfigLoader) LoadConfig(file string) error { |
|
||||||
cm.Lock() |
|
||||||
defer cm.Unlock() |
|
||||||
c, err := ReadConfig(file) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("cannot read config file: %w", err) |
|
||||||
} |
|
||||||
|
|
||||||
cm.configs[c.Name] = *c |
|
||||||
return nil |
|
||||||
} |
|
||||||
|
|
||||||
func (cm *ConfigLoader) GetConfig(m string) (Config, bool) { |
|
||||||
cm.Lock() |
|
||||||
defer cm.Unlock() |
|
||||||
v, exists := cm.configs[m] |
|
||||||
return v, exists |
|
||||||
} |
|
||||||
|
|
||||||
func (cm *ConfigLoader) ListConfigs() []string { |
|
||||||
cm.Lock() |
|
||||||
defer cm.Unlock() |
|
||||||
var res []string |
|
||||||
for k := range cm.configs { |
|
||||||
res = append(res, k) |
|
||||||
} |
|
||||||
return res |
|
||||||
} |
|
||||||
|
|
||||||
func (cm *ConfigLoader) LoadConfigs(path string) error { |
|
||||||
cm.Lock() |
|
||||||
defer cm.Unlock() |
|
||||||
entries, err := os.ReadDir(path) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
files := make([]fs.FileInfo, 0, len(entries)) |
|
||||||
for _, entry := range entries { |
|
||||||
info, err := entry.Info() |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
files = append(files, info) |
|
||||||
} |
|
||||||
for _, file := range files { |
|
||||||
// Skip templates, YAML and .keep files
|
|
||||||
if !strings.Contains(file.Name(), ".yaml") { |
|
||||||
continue |
|
||||||
} |
|
||||||
c, err := ReadConfig(filepath.Join(path, file.Name())) |
|
||||||
if err == nil { |
|
||||||
cm.configs[c.Name] = *c |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return nil |
|
||||||
} |
|
@ -1,56 +0,0 @@ |
|||||||
package api_config_test |
|
||||||
|
|
||||||
import ( |
|
||||||
"os" |
|
||||||
|
|
||||||
. "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
. "github.com/onsi/ginkgo/v2" |
|
||||||
. "github.com/onsi/gomega" |
|
||||||
) |
|
||||||
|
|
||||||
var _ = Describe("Test cases for config related functions", func() { |
|
||||||
|
|
||||||
var ( |
|
||||||
configFile string |
|
||||||
) |
|
||||||
|
|
||||||
Context("Test Read configuration functions", func() { |
|
||||||
configFile = os.Getenv("CONFIG_FILE") |
|
||||||
It("Test ReadConfigFile", func() { |
|
||||||
config, err := ReadConfigFile(configFile) |
|
||||||
Expect(err).To(BeNil()) |
|
||||||
Expect(config).ToNot(BeNil()) |
|
||||||
// two configs in config.yaml
|
|
||||||
Expect(config[0].Name).To(Equal("list1")) |
|
||||||
Expect(config[1].Name).To(Equal("list2")) |
|
||||||
}) |
|
||||||
|
|
||||||
It("Test LoadConfigs", func() { |
|
||||||
cm := NewConfigLoader() |
|
||||||
opts := options.NewOptions() |
|
||||||
modelLoader := model.NewModelLoader(os.Getenv("MODELS_PATH")) |
|
||||||
options.WithModelLoader(modelLoader)(opts) |
|
||||||
|
|
||||||
err := cm.LoadConfigs(opts.Loader.ModelPath) |
|
||||||
Expect(err).To(BeNil()) |
|
||||||
Expect(cm.ListConfigs()).ToNot(BeNil()) |
|
||||||
|
|
||||||
// config should includes gpt4all models's api.config
|
|
||||||
Expect(cm.ListConfigs()).To(ContainElements("gpt4all")) |
|
||||||
|
|
||||||
// config should includes gpt2 models's api.config
|
|
||||||
Expect(cm.ListConfigs()).To(ContainElements("gpt4all-2")) |
|
||||||
|
|
||||||
// config should includes text-embedding-ada-002 models's api.config
|
|
||||||
Expect(cm.ListConfigs()).To(ContainElements("text-embedding-ada-002")) |
|
||||||
|
|
||||||
// config should includes rwkv_test models's api.config
|
|
||||||
Expect(cm.ListConfigs()).To(ContainElements("rwkv_test")) |
|
||||||
|
|
||||||
// config should includes whisper-1 models's api.config
|
|
||||||
Expect(cm.ListConfigs()).To(ContainElements("whisper-1")) |
|
||||||
}) |
|
||||||
}) |
|
||||||
}) |
|
@ -1,37 +0,0 @@ |
|||||||
package api_config |
|
||||||
|
|
||||||
type PredictionOptions struct { |
|
||||||
|
|
||||||
// Also part of the OpenAI official spec
|
|
||||||
Model string `json:"model" yaml:"model"` |
|
||||||
|
|
||||||
// Also part of the OpenAI official spec
|
|
||||||
Language string `json:"language"` |
|
||||||
|
|
||||||
// Also part of the OpenAI official spec. use it for returning multiple results
|
|
||||||
N int `json:"n"` |
|
||||||
|
|
||||||
// Common options between all the API calls, part of the OpenAI spec
|
|
||||||
TopP float64 `json:"top_p" yaml:"top_p"` |
|
||||||
TopK int `json:"top_k" yaml:"top_k"` |
|
||||||
Temperature float64 `json:"temperature" yaml:"temperature"` |
|
||||||
Maxtokens int `json:"max_tokens" yaml:"max_tokens"` |
|
||||||
Echo bool `json:"echo"` |
|
||||||
|
|
||||||
// Custom parameters - not present in the OpenAI API
|
|
||||||
Batch int `json:"batch" yaml:"batch"` |
|
||||||
F16 bool `json:"f16" yaml:"f16"` |
|
||||||
IgnoreEOS bool `json:"ignore_eos" yaml:"ignore_eos"` |
|
||||||
RepeatPenalty float64 `json:"repeat_penalty" yaml:"repeat_penalty"` |
|
||||||
Keep int `json:"n_keep" yaml:"n_keep"` |
|
||||||
|
|
||||||
MirostatETA float64 `json:"mirostat_eta" yaml:"mirostat_eta"` |
|
||||||
MirostatTAU float64 `json:"mirostat_tau" yaml:"mirostat_tau"` |
|
||||||
Mirostat int `json:"mirostat" yaml:"mirostat"` |
|
||||||
|
|
||||||
FrequencyPenalty float64 `json:"frequency_penalty" yaml:"frequency_penalty"` |
|
||||||
TFZ float64 `json:"tfz" yaml:"tfz"` |
|
||||||
|
|
||||||
TypicalP float64 `json:"typical_p" yaml:"typical_p"` |
|
||||||
Seed int `json:"seed" yaml:"seed"` |
|
||||||
} |
|
@ -1,224 +0,0 @@ |
|||||||
package localai |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"fmt" |
|
||||||
"os" |
|
||||||
"strings" |
|
||||||
"sync" |
|
||||||
|
|
||||||
json "github.com/json-iterator/go" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/gallery" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/utils" |
|
||||||
|
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/google/uuid" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
type galleryOp struct { |
|
||||||
req gallery.GalleryModel |
|
||||||
id string |
|
||||||
galleries []gallery.Gallery |
|
||||||
galleryName string |
|
||||||
} |
|
||||||
|
|
||||||
type galleryOpStatus struct { |
|
||||||
Error error `json:"error"` |
|
||||||
Processed bool `json:"processed"` |
|
||||||
Message string `json:"message"` |
|
||||||
Progress float64 `json:"progress"` |
|
||||||
TotalFileSize string `json:"file_size"` |
|
||||||
DownloadedFileSize string `json:"downloaded_size"` |
|
||||||
} |
|
||||||
|
|
||||||
type galleryApplier struct { |
|
||||||
modelPath string |
|
||||||
sync.Mutex |
|
||||||
C chan galleryOp |
|
||||||
statuses map[string]*galleryOpStatus |
|
||||||
} |
|
||||||
|
|
||||||
func NewGalleryService(modelPath string) *galleryApplier { |
|
||||||
return &galleryApplier{ |
|
||||||
modelPath: modelPath, |
|
||||||
C: make(chan galleryOp), |
|
||||||
statuses: make(map[string]*galleryOpStatus), |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// prepareModel applies a
|
|
||||||
func prepareModel(modelPath string, req gallery.GalleryModel, cm *config.ConfigLoader, downloadStatus func(string, string, string, float64)) error { |
|
||||||
|
|
||||||
config, err := gallery.GetGalleryConfigFromURL(req.URL) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
config.Files = append(config.Files, req.AdditionalFiles...) |
|
||||||
|
|
||||||
return gallery.InstallModel(modelPath, req.Name, &config, req.Overrides, downloadStatus) |
|
||||||
} |
|
||||||
|
|
||||||
func (g *galleryApplier) updateStatus(s string, op *galleryOpStatus) { |
|
||||||
g.Lock() |
|
||||||
defer g.Unlock() |
|
||||||
g.statuses[s] = op |
|
||||||
} |
|
||||||
|
|
||||||
func (g *galleryApplier) getStatus(s string) *galleryOpStatus { |
|
||||||
g.Lock() |
|
||||||
defer g.Unlock() |
|
||||||
|
|
||||||
return g.statuses[s] |
|
||||||
} |
|
||||||
|
|
||||||
func (g *galleryApplier) Start(c context.Context, cm *config.ConfigLoader) { |
|
||||||
go func() { |
|
||||||
for { |
|
||||||
select { |
|
||||||
case <-c.Done(): |
|
||||||
return |
|
||||||
case op := <-g.C: |
|
||||||
utils.ResetDownloadTimers() |
|
||||||
|
|
||||||
g.updateStatus(op.id, &galleryOpStatus{Message: "processing", Progress: 0}) |
|
||||||
|
|
||||||
// updates the status with an error
|
|
||||||
updateError := func(e error) { |
|
||||||
g.updateStatus(op.id, &galleryOpStatus{Error: e, Processed: true, Message: "error: " + e.Error()}) |
|
||||||
} |
|
||||||
|
|
||||||
// displayDownload displays the download progress
|
|
||||||
progressCallback := func(fileName string, current string, total string, percentage float64) { |
|
||||||
g.updateStatus(op.id, &galleryOpStatus{Message: "processing", Progress: percentage, TotalFileSize: total, DownloadedFileSize: current}) |
|
||||||
utils.DisplayDownloadFunction(fileName, current, total, percentage) |
|
||||||
} |
|
||||||
|
|
||||||
var err error |
|
||||||
// if the request contains a gallery name, we apply the gallery from the gallery list
|
|
||||||
if op.galleryName != "" { |
|
||||||
if strings.Contains(op.galleryName, "@") { |
|
||||||
err = gallery.InstallModelFromGallery(op.galleries, op.galleryName, g.modelPath, op.req, progressCallback) |
|
||||||
} else { |
|
||||||
err = gallery.InstallModelFromGalleryByName(op.galleries, op.galleryName, g.modelPath, op.req, progressCallback) |
|
||||||
} |
|
||||||
} else { |
|
||||||
err = prepareModel(g.modelPath, op.req, cm, progressCallback) |
|
||||||
} |
|
||||||
|
|
||||||
if err != nil { |
|
||||||
updateError(err) |
|
||||||
continue |
|
||||||
} |
|
||||||
|
|
||||||
// Reload models
|
|
||||||
err = cm.LoadConfigs(g.modelPath) |
|
||||||
if err != nil { |
|
||||||
updateError(err) |
|
||||||
continue |
|
||||||
} |
|
||||||
|
|
||||||
g.updateStatus(op.id, &galleryOpStatus{Processed: true, Message: "completed", Progress: 100}) |
|
||||||
} |
|
||||||
} |
|
||||||
}() |
|
||||||
} |
|
||||||
|
|
||||||
type galleryModel struct { |
|
||||||
gallery.GalleryModel |
|
||||||
ID string `json:"id"` |
|
||||||
} |
|
||||||
|
|
||||||
func ApplyGalleryFromFile(modelPath, s string, cm *config.ConfigLoader, galleries []gallery.Gallery) error { |
|
||||||
dat, err := os.ReadFile(s) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
return ApplyGalleryFromString(modelPath, string(dat), cm, galleries) |
|
||||||
} |
|
||||||
|
|
||||||
func ApplyGalleryFromString(modelPath, s string, cm *config.ConfigLoader, galleries []gallery.Gallery) error { |
|
||||||
var requests []galleryModel |
|
||||||
err := json.Unmarshal([]byte(s), &requests) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
for _, r := range requests { |
|
||||||
utils.ResetDownloadTimers() |
|
||||||
if r.ID == "" { |
|
||||||
err = prepareModel(modelPath, r.GalleryModel, cm, utils.DisplayDownloadFunction) |
|
||||||
} else { |
|
||||||
err = gallery.InstallModelFromGallery(galleries, r.ID, modelPath, r.GalleryModel, utils.DisplayDownloadFunction) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
/// Endpoints
|
|
||||||
|
|
||||||
func GetOpStatusEndpoint(g *galleryApplier) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
|
|
||||||
status := g.getStatus(c.Params("uuid")) |
|
||||||
if status == nil { |
|
||||||
return fmt.Errorf("could not find any status for ID") |
|
||||||
} |
|
||||||
|
|
||||||
return c.JSON(status) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
type GalleryModel struct { |
|
||||||
ID string `json:"id"` |
|
||||||
gallery.GalleryModel |
|
||||||
} |
|
||||||
|
|
||||||
func ApplyModelGalleryEndpoint(modelPath string, cm *config.ConfigLoader, g chan galleryOp, galleries []gallery.Gallery) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
input := new(GalleryModel) |
|
||||||
// Get input data from the request body
|
|
||||||
if err := c.BodyParser(input); err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
uuid, err := uuid.NewUUID() |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
g <- galleryOp{ |
|
||||||
req: input.GalleryModel, |
|
||||||
id: uuid.String(), |
|
||||||
galleryName: input.ID, |
|
||||||
galleries: galleries, |
|
||||||
} |
|
||||||
return c.JSON(struct { |
|
||||||
ID string `json:"uuid"` |
|
||||||
StatusURL string `json:"status"` |
|
||||||
}{ID: uuid.String(), StatusURL: c.BaseURL() + "/models/jobs/" + uuid.String()}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func ListModelFromGalleryEndpoint(galleries []gallery.Gallery, basePath string) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
log.Debug().Msgf("Listing models from galleries: %+v", galleries) |
|
||||||
|
|
||||||
models, err := gallery.AvailableGalleryModels(galleries, basePath) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
log.Debug().Msgf("Models found from galleries: %+v", models) |
|
||||||
for _, m := range models { |
|
||||||
log.Debug().Msgf("Model found from galleries: %+v", m) |
|
||||||
} |
|
||||||
dat, err := json.Marshal(models) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
return c.Send(dat) |
|
||||||
} |
|
||||||
} |
|
@ -1,31 +0,0 @@ |
|||||||
package localai |
|
||||||
|
|
||||||
import ( |
|
||||||
"github.com/go-skynet/LocalAI/api/backend" |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
) |
|
||||||
|
|
||||||
type TTSRequest struct { |
|
||||||
Model string `json:"model" yaml:"model"` |
|
||||||
Input string `json:"input" yaml:"input"` |
|
||||||
} |
|
||||||
|
|
||||||
func TTSEndpoint(cm *config.ConfigLoader, o *options.Option) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
|
|
||||||
input := new(TTSRequest) |
|
||||||
// Get input data from the request body
|
|
||||||
if err := c.BodyParser(input); err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
filePath, _, err := backend.ModelTTS(input.Input, input.Model, o.Loader, o) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
return c.Download(filePath) |
|
||||||
} |
|
||||||
} |
|
@ -1,105 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/pkg/grammar" |
|
||||||
) |
|
||||||
|
|
||||||
// APIError provides error information returned by the OpenAI API.
|
|
||||||
type APIError struct { |
|
||||||
Code any `json:"code,omitempty"` |
|
||||||
Message string `json:"message"` |
|
||||||
Param *string `json:"param,omitempty"` |
|
||||||
Type string `json:"type"` |
|
||||||
} |
|
||||||
|
|
||||||
type ErrorResponse struct { |
|
||||||
Error *APIError `json:"error,omitempty"` |
|
||||||
} |
|
||||||
|
|
||||||
type OpenAIUsage struct { |
|
||||||
PromptTokens int `json:"prompt_tokens"` |
|
||||||
CompletionTokens int `json:"completion_tokens"` |
|
||||||
TotalTokens int `json:"total_tokens"` |
|
||||||
} |
|
||||||
|
|
||||||
type Item struct { |
|
||||||
Embedding []float32 `json:"embedding"` |
|
||||||
Index int `json:"index"` |
|
||||||
Object string `json:"object,omitempty"` |
|
||||||
|
|
||||||
// Images
|
|
||||||
URL string `json:"url,omitempty"` |
|
||||||
B64JSON string `json:"b64_json,omitempty"` |
|
||||||
} |
|
||||||
|
|
||||||
type OpenAIResponse struct { |
|
||||||
Created int `json:"created,omitempty"` |
|
||||||
Object string `json:"object,omitempty"` |
|
||||||
ID string `json:"id,omitempty"` |
|
||||||
Model string `json:"model,omitempty"` |
|
||||||
Choices []Choice `json:"choices,omitempty"` |
|
||||||
Data []Item `json:"data,omitempty"` |
|
||||||
|
|
||||||
Usage OpenAIUsage `json:"usage"` |
|
||||||
} |
|
||||||
|
|
||||||
type Choice struct { |
|
||||||
Index int `json:"index"` |
|
||||||
FinishReason string `json:"finish_reason,omitempty"` |
|
||||||
Message *Message `json:"message,omitempty"` |
|
||||||
Delta *Message `json:"delta,omitempty"` |
|
||||||
Text string `json:"text,omitempty"` |
|
||||||
} |
|
||||||
|
|
||||||
type Message struct { |
|
||||||
// The message role
|
|
||||||
Role string `json:"role,omitempty" yaml:"role"` |
|
||||||
// The message content
|
|
||||||
Content *string `json:"content" yaml:"content"` |
|
||||||
// A result of a function call
|
|
||||||
FunctionCall interface{} `json:"function_call,omitempty" yaml:"function_call,omitempty"` |
|
||||||
} |
|
||||||
|
|
||||||
type OpenAIModel struct { |
|
||||||
ID string `json:"id"` |
|
||||||
Object string `json:"object"` |
|
||||||
} |
|
||||||
|
|
||||||
type OpenAIRequest struct { |
|
||||||
config.PredictionOptions |
|
||||||
|
|
||||||
// whisper
|
|
||||||
File string `json:"file" validate:"required"` |
|
||||||
//whisper/image
|
|
||||||
ResponseFormat string `json:"response_format"` |
|
||||||
// image
|
|
||||||
Size string `json:"size"` |
|
||||||
// Prompt is read only by completion/image API calls
|
|
||||||
Prompt interface{} `json:"prompt" yaml:"prompt"` |
|
||||||
|
|
||||||
// Edit endpoint
|
|
||||||
Instruction string `json:"instruction" yaml:"instruction"` |
|
||||||
Input interface{} `json:"input" yaml:"input"` |
|
||||||
|
|
||||||
Stop interface{} `json:"stop" yaml:"stop"` |
|
||||||
|
|
||||||
// Messages is read only by chat/completion API calls
|
|
||||||
Messages []Message `json:"messages" yaml:"messages"` |
|
||||||
|
|
||||||
// A list of available functions to call
|
|
||||||
Functions []grammar.Function `json:"functions" yaml:"functions"` |
|
||||||
FunctionCall interface{} `json:"function_call" yaml:"function_call"` // might be a string or an object
|
|
||||||
|
|
||||||
Stream bool `json:"stream"` |
|
||||||
|
|
||||||
// Image (not supported by OpenAI)
|
|
||||||
Mode int `json:"mode"` |
|
||||||
Step int `json:"step"` |
|
||||||
|
|
||||||
// A grammar to constrain the LLM output
|
|
||||||
Grammar string `json:"grammar" yaml:"grammar"` |
|
||||||
|
|
||||||
JSONFunctionGrammarObject *grammar.JSONFunctionStructure `json:"grammar_json_functions" yaml:"grammar_json_functions"` |
|
||||||
} |
|
@ -1,322 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"bufio" |
|
||||||
"bytes" |
|
||||||
"encoding/json" |
|
||||||
"fmt" |
|
||||||
"strings" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/api/backend" |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/go-skynet/LocalAI/pkg/grammar" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
"github.com/valyala/fasthttp" |
|
||||||
) |
|
||||||
|
|
||||||
func ChatEndpoint(cm *config.ConfigLoader, o *options.Option) func(c *fiber.Ctx) error { |
|
||||||
emptyMessage := "" |
|
||||||
|
|
||||||
process := func(s string, req *OpenAIRequest, config *config.Config, loader *model.ModelLoader, responses chan OpenAIResponse) { |
|
||||||
initialMessage := OpenAIResponse{ |
|
||||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: []Choice{{Delta: &Message{Role: "assistant", Content: &emptyMessage}}}, |
|
||||||
Object: "chat.completion.chunk", |
|
||||||
} |
|
||||||
responses <- initialMessage |
|
||||||
|
|
||||||
ComputeChoices(s, req.N, config, o, loader, func(s string, c *[]Choice) {}, func(s string) bool { |
|
||||||
resp := OpenAIResponse{ |
|
||||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: []Choice{{Delta: &Message{Content: &s}, Index: 0}}, |
|
||||||
Object: "chat.completion.chunk", |
|
||||||
} |
|
||||||
|
|
||||||
responses <- resp |
|
||||||
return true |
|
||||||
}) |
|
||||||
close(responses) |
|
||||||
} |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
processFunctions := false |
|
||||||
funcs := grammar.Functions{} |
|
||||||
model, input, err := readInput(c, o.Loader, true) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
config, input, err := readConfig(model, input, cm, o.Loader, o.Debug, o.Threads, o.ContextSize, o.F16) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
log.Debug().Msgf("Configuration read: %+v", config) |
|
||||||
|
|
||||||
// Allow the user to set custom actions via config file
|
|
||||||
// to be "embedded" in each model
|
|
||||||
noActionName := "answer" |
|
||||||
noActionDescription := "use this action to answer without performing any action" |
|
||||||
|
|
||||||
if config.FunctionsConfig.NoActionFunctionName != "" { |
|
||||||
noActionName = config.FunctionsConfig.NoActionFunctionName |
|
||||||
} |
|
||||||
if config.FunctionsConfig.NoActionDescriptionName != "" { |
|
||||||
noActionDescription = config.FunctionsConfig.NoActionDescriptionName |
|
||||||
} |
|
||||||
|
|
||||||
// process functions if we have any defined or if we have a function call string
|
|
||||||
if len(input.Functions) > 0 && config.ShouldUseFunctions() { |
|
||||||
log.Debug().Msgf("Response needs to process functions") |
|
||||||
|
|
||||||
processFunctions = true |
|
||||||
|
|
||||||
noActionGrammar := grammar.Function{ |
|
||||||
Name: noActionName, |
|
||||||
Description: noActionDescription, |
|
||||||
Parameters: map[string]interface{}{ |
|
||||||
"properties": map[string]interface{}{ |
|
||||||
"message": map[string]interface{}{ |
|
||||||
"type": "string", |
|
||||||
"description": "The message to reply the user with", |
|
||||||
}}, |
|
||||||
}, |
|
||||||
} |
|
||||||
|
|
||||||
// Append the no action function
|
|
||||||
funcs = append(funcs, input.Functions...) |
|
||||||
if !config.FunctionsConfig.DisableNoAction { |
|
||||||
funcs = append(funcs, noActionGrammar) |
|
||||||
} |
|
||||||
|
|
||||||
// Force picking one of the functions by the request
|
|
||||||
if config.FunctionToCall() != "" { |
|
||||||
funcs = funcs.Select(config.FunctionToCall()) |
|
||||||
} |
|
||||||
|
|
||||||
// Update input grammar
|
|
||||||
jsStruct := funcs.ToJSONStructure() |
|
||||||
config.Grammar = jsStruct.Grammar("") |
|
||||||
} else if input.JSONFunctionGrammarObject != nil { |
|
||||||
config.Grammar = input.JSONFunctionGrammarObject.Grammar("") |
|
||||||
} |
|
||||||
|
|
||||||
// functions are not supported in stream mode (yet?)
|
|
||||||
toStream := input.Stream && !processFunctions |
|
||||||
|
|
||||||
log.Debug().Msgf("Parameters: %+v", config) |
|
||||||
|
|
||||||
var predInput string |
|
||||||
|
|
||||||
mess := []string{} |
|
||||||
for _, i := range input.Messages { |
|
||||||
var content string |
|
||||||
role := i.Role |
|
||||||
// if function call, we might want to customize the role so we can display better that the "assistant called a json action"
|
|
||||||
// if an "assistant_function_call" role is defined, we use it, otherwise we use the role that is passed by in the request
|
|
||||||
if i.FunctionCall != nil && i.Role == "assistant" { |
|
||||||
roleFn := "assistant_function_call" |
|
||||||
r := config.Roles[roleFn] |
|
||||||
if r != "" { |
|
||||||
role = roleFn |
|
||||||
} |
|
||||||
} |
|
||||||
r := config.Roles[role] |
|
||||||
contentExists := i.Content != nil && *i.Content != "" |
|
||||||
if r != "" { |
|
||||||
if contentExists { |
|
||||||
content = fmt.Sprint(r, " ", *i.Content) |
|
||||||
} |
|
||||||
if i.FunctionCall != nil { |
|
||||||
j, err := json.Marshal(i.FunctionCall) |
|
||||||
if err == nil { |
|
||||||
if contentExists { |
|
||||||
content += "\n" + fmt.Sprint(r, " ", string(j)) |
|
||||||
} else { |
|
||||||
content = fmt.Sprint(r, " ", string(j)) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
} else { |
|
||||||
if contentExists { |
|
||||||
content = fmt.Sprint(*i.Content) |
|
||||||
} |
|
||||||
if i.FunctionCall != nil { |
|
||||||
j, err := json.Marshal(i.FunctionCall) |
|
||||||
if err == nil { |
|
||||||
if contentExists { |
|
||||||
content += "\n" + string(j) |
|
||||||
} else { |
|
||||||
content = string(j) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
mess = append(mess, content) |
|
||||||
} |
|
||||||
|
|
||||||
predInput = strings.Join(mess, "\n") |
|
||||||
log.Debug().Msgf("Prompt (before templating): %s", predInput) |
|
||||||
|
|
||||||
if toStream { |
|
||||||
log.Debug().Msgf("Stream request received") |
|
||||||
c.Context().SetContentType("text/event-stream") |
|
||||||
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
|
|
||||||
// c.Set("Content-Type", "text/event-stream")
|
|
||||||
c.Set("Cache-Control", "no-cache") |
|
||||||
c.Set("Connection", "keep-alive") |
|
||||||
c.Set("Transfer-Encoding", "chunked") |
|
||||||
} |
|
||||||
|
|
||||||
templateFile := config.Model |
|
||||||
|
|
||||||
if config.TemplateConfig.Chat != "" && !processFunctions { |
|
||||||
templateFile = config.TemplateConfig.Chat |
|
||||||
} |
|
||||||
|
|
||||||
if config.TemplateConfig.Functions != "" && processFunctions { |
|
||||||
templateFile = config.TemplateConfig.Functions |
|
||||||
} |
|
||||||
|
|
||||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
|
||||||
templatedInput, err := o.Loader.TemplatePrefix(templateFile, struct { |
|
||||||
Input string |
|
||||||
Functions []grammar.Function |
|
||||||
}{ |
|
||||||
Input: predInput, |
|
||||||
Functions: funcs, |
|
||||||
}) |
|
||||||
if err == nil { |
|
||||||
predInput = templatedInput |
|
||||||
log.Debug().Msgf("Template found, input modified to: %s", predInput) |
|
||||||
} else { |
|
||||||
log.Debug().Msgf("Template failed loading: %s", err.Error()) |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("Prompt (after templating): %s", predInput) |
|
||||||
if processFunctions { |
|
||||||
log.Debug().Msgf("Grammar: %+v", config.Grammar) |
|
||||||
} |
|
||||||
|
|
||||||
if toStream { |
|
||||||
responses := make(chan OpenAIResponse) |
|
||||||
|
|
||||||
go process(predInput, input, config, o.Loader, responses) |
|
||||||
|
|
||||||
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) { |
|
||||||
|
|
||||||
for ev := range responses { |
|
||||||
var buf bytes.Buffer |
|
||||||
enc := json.NewEncoder(&buf) |
|
||||||
enc.Encode(ev) |
|
||||||
|
|
||||||
log.Debug().Msgf("Sending chunk: %s", buf.String()) |
|
||||||
fmt.Fprintf(w, "data: %v\n", buf.String()) |
|
||||||
w.Flush() |
|
||||||
} |
|
||||||
|
|
||||||
resp := &OpenAIResponse{ |
|
||||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: []Choice{ |
|
||||||
{ |
|
||||||
FinishReason: "stop", |
|
||||||
Index: 0, |
|
||||||
Delta: &Message{Content: &emptyMessage}, |
|
||||||
}}, |
|
||||||
Object: "chat.completion.chunk", |
|
||||||
} |
|
||||||
respData, _ := json.Marshal(resp) |
|
||||||
|
|
||||||
w.WriteString(fmt.Sprintf("data: %s\n\n", respData)) |
|
||||||
w.WriteString("data: [DONE]\n\n") |
|
||||||
w.Flush() |
|
||||||
})) |
|
||||||
return nil |
|
||||||
} |
|
||||||
|
|
||||||
result, err := ComputeChoices(predInput, input.N, config, o, o.Loader, func(s string, c *[]Choice) { |
|
||||||
if processFunctions { |
|
||||||
// As we have to change the result before processing, we can't stream the answer (yet?)
|
|
||||||
ss := map[string]interface{}{} |
|
||||||
json.Unmarshal([]byte(s), &ss) |
|
||||||
log.Debug().Msgf("Function return: %s %+v", s, ss) |
|
||||||
|
|
||||||
// The grammar defines the function name as "function", while OpenAI returns "name"
|
|
||||||
func_name := ss["function"] |
|
||||||
// Similarly, while here arguments is a map[string]interface{}, OpenAI actually want a stringified object
|
|
||||||
args := ss["arguments"] // arguments needs to be a string, but we return an object from the grammar result (TODO: fix)
|
|
||||||
d, _ := json.Marshal(args) |
|
||||||
|
|
||||||
ss["arguments"] = string(d) |
|
||||||
ss["name"] = func_name |
|
||||||
|
|
||||||
// if do nothing, reply with a message
|
|
||||||
if func_name == noActionName { |
|
||||||
log.Debug().Msgf("nothing to do, computing a reply") |
|
||||||
|
|
||||||
// If there is a message that the LLM already sends as part of the JSON reply, use it
|
|
||||||
arguments := map[string]interface{}{} |
|
||||||
json.Unmarshal([]byte(d), &arguments) |
|
||||||
m, exists := arguments["message"] |
|
||||||
if exists { |
|
||||||
switch message := m.(type) { |
|
||||||
case string: |
|
||||||
if message != "" { |
|
||||||
log.Debug().Msgf("Reply received from LLM: %s", message) |
|
||||||
message = backend.Finetune(*config, predInput, message) |
|
||||||
log.Debug().Msgf("Reply received from LLM(finetuned): %s", message) |
|
||||||
|
|
||||||
*c = append(*c, Choice{Message: &Message{Role: "assistant", Content: &message}}) |
|
||||||
return |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("No action received from LLM, without a message, computing a reply") |
|
||||||
// Otherwise ask the LLM to understand the JSON output and the context, and return a message
|
|
||||||
// Note: This costs (in term of CPU) another computation
|
|
||||||
config.Grammar = "" |
|
||||||
predFunc, err := backend.ModelInference(predInput, o.Loader, *config, o, nil) |
|
||||||
if err != nil { |
|
||||||
log.Error().Msgf("inference error: %s", err.Error()) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
prediction, err := predFunc() |
|
||||||
if err != nil { |
|
||||||
log.Error().Msgf("inference error: %s", err.Error()) |
|
||||||
return |
|
||||||
} |
|
||||||
|
|
||||||
prediction = backend.Finetune(*config, predInput, prediction) |
|
||||||
*c = append(*c, Choice{Message: &Message{Role: "assistant", Content: &prediction}}) |
|
||||||
} else { |
|
||||||
// otherwise reply with the function call
|
|
||||||
*c = append(*c, Choice{ |
|
||||||
FinishReason: "function_call", |
|
||||||
Message: &Message{Role: "assistant", FunctionCall: ss}, |
|
||||||
}) |
|
||||||
} |
|
||||||
|
|
||||||
return |
|
||||||
} |
|
||||||
*c = append(*c, Choice{FinishReason: "stop", Index: 0, Message: &Message{Role: "assistant", Content: &s}}) |
|
||||||
}, nil) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
resp := &OpenAIResponse{ |
|
||||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: result, |
|
||||||
Object: "chat.completion", |
|
||||||
} |
|
||||||
respData, _ := json.Marshal(resp) |
|
||||||
log.Debug().Msgf("Response: %s", respData) |
|
||||||
|
|
||||||
// Return the prediction in the response body
|
|
||||||
return c.JSON(resp) |
|
||||||
} |
|
||||||
} |
|
@ -1,159 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"bufio" |
|
||||||
"bytes" |
|
||||||
"encoding/json" |
|
||||||
"errors" |
|
||||||
"fmt" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
"github.com/valyala/fasthttp" |
|
||||||
) |
|
||||||
|
|
||||||
// https://platform.openai.com/docs/api-reference/completions
|
|
||||||
func CompletionEndpoint(cm *config.ConfigLoader, o *options.Option) func(c *fiber.Ctx) error { |
|
||||||
process := func(s string, req *OpenAIRequest, config *config.Config, loader *model.ModelLoader, responses chan OpenAIResponse) { |
|
||||||
ComputeChoices(s, req.N, config, o, loader, func(s string, c *[]Choice) {}, func(s string) bool { |
|
||||||
resp := OpenAIResponse{ |
|
||||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: []Choice{ |
|
||||||
{ |
|
||||||
Index: 0, |
|
||||||
Text: s, |
|
||||||
}, |
|
||||||
}, |
|
||||||
Object: "text_completion", |
|
||||||
} |
|
||||||
log.Debug().Msgf("Sending goroutine: %s", s) |
|
||||||
|
|
||||||
responses <- resp |
|
||||||
return true |
|
||||||
}) |
|
||||||
close(responses) |
|
||||||
} |
|
||||||
|
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
model, input, err := readInput(c, o.Loader, true) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("`input`: %+v", input) |
|
||||||
|
|
||||||
config, input, err := readConfig(model, input, cm, o.Loader, o.Debug, o.Threads, o.ContextSize, o.F16) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("Parameter Config: %+v", config) |
|
||||||
|
|
||||||
if input.Stream { |
|
||||||
log.Debug().Msgf("Stream request received") |
|
||||||
c.Context().SetContentType("text/event-stream") |
|
||||||
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
|
|
||||||
//c.Set("Content-Type", "text/event-stream")
|
|
||||||
c.Set("Cache-Control", "no-cache") |
|
||||||
c.Set("Connection", "keep-alive") |
|
||||||
c.Set("Transfer-Encoding", "chunked") |
|
||||||
} |
|
||||||
|
|
||||||
templateFile := config.Model |
|
||||||
|
|
||||||
if config.TemplateConfig.Completion != "" { |
|
||||||
templateFile = config.TemplateConfig.Completion |
|
||||||
} |
|
||||||
|
|
||||||
if input.Stream { |
|
||||||
if len(config.PromptStrings) > 1 { |
|
||||||
return errors.New("cannot handle more than 1 `PromptStrings` when Streaming") |
|
||||||
} |
|
||||||
|
|
||||||
predInput := config.PromptStrings[0] |
|
||||||
|
|
||||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
|
||||||
templatedInput, err := o.Loader.TemplatePrefix(templateFile, struct { |
|
||||||
Input string |
|
||||||
}{ |
|
||||||
Input: predInput, |
|
||||||
}) |
|
||||||
if err == nil { |
|
||||||
predInput = templatedInput |
|
||||||
log.Debug().Msgf("Template found, input modified to: %s", predInput) |
|
||||||
} |
|
||||||
|
|
||||||
responses := make(chan OpenAIResponse) |
|
||||||
|
|
||||||
go process(predInput, input, config, o.Loader, responses) |
|
||||||
|
|
||||||
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) { |
|
||||||
|
|
||||||
for ev := range responses { |
|
||||||
var buf bytes.Buffer |
|
||||||
enc := json.NewEncoder(&buf) |
|
||||||
enc.Encode(ev) |
|
||||||
|
|
||||||
log.Debug().Msgf("Sending chunk: %s", buf.String()) |
|
||||||
fmt.Fprintf(w, "data: %v\n", buf.String()) |
|
||||||
w.Flush() |
|
||||||
} |
|
||||||
|
|
||||||
resp := &OpenAIResponse{ |
|
||||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: []Choice{ |
|
||||||
{ |
|
||||||
Index: 0, |
|
||||||
FinishReason: "stop", |
|
||||||
}, |
|
||||||
}, |
|
||||||
Object: "text_completion", |
|
||||||
} |
|
||||||
respData, _ := json.Marshal(resp) |
|
||||||
|
|
||||||
w.WriteString(fmt.Sprintf("data: %s\n\n", respData)) |
|
||||||
w.WriteString("data: [DONE]\n\n") |
|
||||||
w.Flush() |
|
||||||
})) |
|
||||||
return nil |
|
||||||
} |
|
||||||
|
|
||||||
var result []Choice |
|
||||||
for k, i := range config.PromptStrings { |
|
||||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
|
||||||
templatedInput, err := o.Loader.TemplatePrefix(templateFile, struct { |
|
||||||
Input string |
|
||||||
}{ |
|
||||||
Input: i, |
|
||||||
}) |
|
||||||
if err == nil { |
|
||||||
i = templatedInput |
|
||||||
log.Debug().Msgf("Template found, input modified to: %s", i) |
|
||||||
} |
|
||||||
|
|
||||||
r, err := ComputeChoices(i, input.N, config, o, o.Loader, func(s string, c *[]Choice) { |
|
||||||
*c = append(*c, Choice{Text: s, FinishReason: "stop", Index: k}) |
|
||||||
}, nil) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
result = append(result, r...) |
|
||||||
} |
|
||||||
|
|
||||||
resp := &OpenAIResponse{ |
|
||||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: result, |
|
||||||
Object: "text_completion", |
|
||||||
} |
|
||||||
|
|
||||||
jsonResult, _ := json.Marshal(resp) |
|
||||||
log.Debug().Msgf("Response: %s", jsonResult) |
|
||||||
|
|
||||||
// Return the prediction in the response body
|
|
||||||
return c.JSON(resp) |
|
||||||
} |
|
||||||
} |
|
@ -1,67 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"encoding/json" |
|
||||||
"fmt" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
func EditEndpoint(cm *config.ConfigLoader, o *options.Option) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
model, input, err := readInput(c, o.Loader, true) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
config, input, err := readConfig(model, input, cm, o.Loader, o.Debug, o.Threads, o.ContextSize, o.F16) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("Parameter Config: %+v", config) |
|
||||||
|
|
||||||
templateFile := config.Model |
|
||||||
|
|
||||||
if config.TemplateConfig.Edit != "" { |
|
||||||
templateFile = config.TemplateConfig.Edit |
|
||||||
} |
|
||||||
|
|
||||||
var result []Choice |
|
||||||
for _, i := range config.InputStrings { |
|
||||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
|
||||||
templatedInput, err := o.Loader.TemplatePrefix(templateFile, struct { |
|
||||||
Input string |
|
||||||
Instruction string |
|
||||||
}{Input: i}) |
|
||||||
if err == nil { |
|
||||||
i = templatedInput |
|
||||||
log.Debug().Msgf("Template found, input modified to: %s", i) |
|
||||||
} |
|
||||||
|
|
||||||
r, err := ComputeChoices(i, input.N, config, o, o.Loader, func(s string, c *[]Choice) { |
|
||||||
*c = append(*c, Choice{Text: s}) |
|
||||||
}, nil) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
result = append(result, r...) |
|
||||||
} |
|
||||||
|
|
||||||
resp := &OpenAIResponse{ |
|
||||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Choices: result, |
|
||||||
Object: "edit", |
|
||||||
} |
|
||||||
|
|
||||||
jsonResult, _ := json.Marshal(resp) |
|
||||||
log.Debug().Msgf("Response: %s", jsonResult) |
|
||||||
|
|
||||||
// Return the prediction in the response body
|
|
||||||
return c.JSON(resp) |
|
||||||
} |
|
||||||
} |
|
@ -1,70 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"encoding/json" |
|
||||||
"fmt" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/api/backend" |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
// https://platform.openai.com/docs/api-reference/embeddings
|
|
||||||
func EmbeddingsEndpoint(cm *config.ConfigLoader, o *options.Option) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
model, input, err := readInput(c, o.Loader, true) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
config, input, err := readConfig(model, input, cm, o.Loader, o.Debug, o.Threads, o.ContextSize, o.F16) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("Parameter Config: %+v", config) |
|
||||||
items := []Item{} |
|
||||||
|
|
||||||
for i, s := range config.InputToken { |
|
||||||
// get the model function to call for the result
|
|
||||||
embedFn, err := backend.ModelEmbedding("", s, o.Loader, *config, o) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
embeddings, err := embedFn() |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
items = append(items, Item{Embedding: embeddings, Index: i, Object: "embedding"}) |
|
||||||
} |
|
||||||
|
|
||||||
for i, s := range config.InputStrings { |
|
||||||
// get the model function to call for the result
|
|
||||||
embedFn, err := backend.ModelEmbedding(s, []int{}, o.Loader, *config, o) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
embeddings, err := embedFn() |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
items = append(items, Item{Embedding: embeddings, Index: i, Object: "embedding"}) |
|
||||||
} |
|
||||||
|
|
||||||
resp := &OpenAIResponse{ |
|
||||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
|
||||||
Data: items, |
|
||||||
Object: "list", |
|
||||||
} |
|
||||||
|
|
||||||
jsonResult, _ := json.Marshal(resp) |
|
||||||
log.Debug().Msgf("Response: %s", jsonResult) |
|
||||||
|
|
||||||
// Return the prediction in the response body
|
|
||||||
return c.JSON(resp) |
|
||||||
} |
|
||||||
} |
|
@ -1,158 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"encoding/base64" |
|
||||||
"encoding/json" |
|
||||||
"fmt" |
|
||||||
"io/ioutil" |
|
||||||
"os" |
|
||||||
"path/filepath" |
|
||||||
"strconv" |
|
||||||
"strings" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/api/backend" |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
// https://platform.openai.com/docs/api-reference/images/create
|
|
||||||
|
|
||||||
/* |
|
||||||
* |
|
||||||
|
|
||||||
curl http://localhost:8080/v1/images/generations \
|
|
||||||
-H "Content-Type: application/json" \
|
|
||||||
-d '{ |
|
||||||
"prompt": "A cute baby sea otter", |
|
||||||
"n": 1, |
|
||||||
"size": "512x512" |
|
||||||
}' |
|
||||||
|
|
||||||
* |
|
||||||
*/ |
|
||||||
func ImageEndpoint(cm *config.ConfigLoader, o *options.Option) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
m, input, err := readInput(c, o.Loader, false) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
if m == "" { |
|
||||||
m = model.StableDiffusionBackend |
|
||||||
} |
|
||||||
log.Debug().Msgf("Loading model: %+v", m) |
|
||||||
|
|
||||||
config, input, err := readConfig(m, input, cm, o.Loader, o.Debug, 0, 0, false) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("Parameter Config: %+v", config) |
|
||||||
|
|
||||||
// XXX: Only stablediffusion is supported for now
|
|
||||||
if config.Backend == "" { |
|
||||||
config.Backend = model.StableDiffusionBackend |
|
||||||
} |
|
||||||
|
|
||||||
sizeParts := strings.Split(input.Size, "x") |
|
||||||
if len(sizeParts) != 2 { |
|
||||||
return fmt.Errorf("Invalid value for 'size'") |
|
||||||
} |
|
||||||
width, err := strconv.Atoi(sizeParts[0]) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("Invalid value for 'size'") |
|
||||||
} |
|
||||||
height, err := strconv.Atoi(sizeParts[1]) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("Invalid value for 'size'") |
|
||||||
} |
|
||||||
|
|
||||||
b64JSON := false |
|
||||||
if input.ResponseFormat == "b64_json" { |
|
||||||
b64JSON = true |
|
||||||
} |
|
||||||
|
|
||||||
var result []Item |
|
||||||
for _, i := range config.PromptStrings { |
|
||||||
n := input.N |
|
||||||
if input.N == 0 { |
|
||||||
n = 1 |
|
||||||
} |
|
||||||
for j := 0; j < n; j++ { |
|
||||||
prompts := strings.Split(i, "|") |
|
||||||
positive_prompt := prompts[0] |
|
||||||
negative_prompt := "" |
|
||||||
if len(prompts) > 1 { |
|
||||||
negative_prompt = prompts[1] |
|
||||||
} |
|
||||||
|
|
||||||
mode := 0 |
|
||||||
step := 15 |
|
||||||
|
|
||||||
if input.Mode != 0 { |
|
||||||
mode = input.Mode |
|
||||||
} |
|
||||||
|
|
||||||
if input.Step != 0 { |
|
||||||
step = input.Step |
|
||||||
} |
|
||||||
|
|
||||||
tempDir := "" |
|
||||||
if !b64JSON { |
|
||||||
tempDir = o.ImageDir |
|
||||||
} |
|
||||||
// Create a temporary file
|
|
||||||
outputFile, err := ioutil.TempFile(tempDir, "b64") |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
outputFile.Close() |
|
||||||
output := outputFile.Name() + ".png" |
|
||||||
// Rename the temporary file
|
|
||||||
err = os.Rename(outputFile.Name(), output) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
baseURL := c.BaseURL() |
|
||||||
|
|
||||||
fn, err := backend.ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, o.Loader, *config, o) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
if err := fn(); err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
item := &Item{} |
|
||||||
|
|
||||||
if b64JSON { |
|
||||||
defer os.RemoveAll(output) |
|
||||||
data, err := os.ReadFile(output) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
item.B64JSON = base64.StdEncoding.EncodeToString(data) |
|
||||||
} else { |
|
||||||
base := filepath.Base(output) |
|
||||||
item.URL = baseURL + "/generated-images/" + base |
|
||||||
} |
|
||||||
|
|
||||||
result = append(result, *item) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
resp := &OpenAIResponse{ |
|
||||||
Data: result, |
|
||||||
} |
|
||||||
|
|
||||||
jsonResult, _ := json.Marshal(resp) |
|
||||||
log.Debug().Msgf("Response: %s", jsonResult) |
|
||||||
|
|
||||||
// Return the prediction in the response body
|
|
||||||
return c.JSON(resp) |
|
||||||
} |
|
||||||
} |
|
@ -1,36 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"github.com/go-skynet/LocalAI/api/backend" |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
) |
|
||||||
|
|
||||||
func ComputeChoices(predInput string, n int, config *config.Config, o *options.Option, loader *model.ModelLoader, cb func(string, *[]Choice), tokenCallback func(string) bool) ([]Choice, error) { |
|
||||||
result := []Choice{} |
|
||||||
|
|
||||||
if n == 0 { |
|
||||||
n = 1 |
|
||||||
} |
|
||||||
|
|
||||||
// get the model function to call for the result
|
|
||||||
predFunc, err := backend.ModelInference(predInput, loader, *config, o, tokenCallback) |
|
||||||
if err != nil { |
|
||||||
return result, err |
|
||||||
} |
|
||||||
|
|
||||||
for i := 0; i < n; i++ { |
|
||||||
prediction, err := predFunc() |
|
||||||
if err != nil { |
|
||||||
return result, err |
|
||||||
} |
|
||||||
|
|
||||||
prediction = backend.Finetune(*config, predInput, prediction) |
|
||||||
cb(prediction, &result) |
|
||||||
|
|
||||||
//result = append(result, Choice{Text: prediction})
|
|
||||||
|
|
||||||
} |
|
||||||
return result, err |
|
||||||
} |
|
@ -1,37 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
) |
|
||||||
|
|
||||||
func ListModelsEndpoint(loader *model.ModelLoader, cm *config.ConfigLoader) func(ctx *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
models, err := loader.ListModels() |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
var mm map[string]interface{} = map[string]interface{}{} |
|
||||||
|
|
||||||
dataModels := []OpenAIModel{} |
|
||||||
for _, m := range models { |
|
||||||
mm[m] = nil |
|
||||||
dataModels = append(dataModels, OpenAIModel{ID: m, Object: "model"}) |
|
||||||
} |
|
||||||
|
|
||||||
for _, k := range cm.ListConfigs() { |
|
||||||
if _, exists := mm[k]; !exists { |
|
||||||
dataModels = append(dataModels, OpenAIModel{ID: k, Object: "model"}) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
return c.JSON(struct { |
|
||||||
Object string `json:"object"` |
|
||||||
Data []OpenAIModel `json:"data"` |
|
||||||
}{ |
|
||||||
Object: "list", |
|
||||||
Data: dataModels, |
|
||||||
}) |
|
||||||
} |
|
||||||
} |
|
@ -1,234 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"encoding/json" |
|
||||||
"fmt" |
|
||||||
"os" |
|
||||||
"path/filepath" |
|
||||||
"strings" |
|
||||||
|
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
func readInput(c *fiber.Ctx, loader *model.ModelLoader, randomModel bool) (string, *OpenAIRequest, error) { |
|
||||||
input := new(OpenAIRequest) |
|
||||||
// Get input data from the request body
|
|
||||||
if err := c.BodyParser(input); err != nil { |
|
||||||
return "", nil, err |
|
||||||
} |
|
||||||
|
|
||||||
modelFile := input.Model |
|
||||||
|
|
||||||
if c.Params("model") != "" { |
|
||||||
modelFile = c.Params("model") |
|
||||||
} |
|
||||||
|
|
||||||
received, _ := json.Marshal(input) |
|
||||||
|
|
||||||
log.Debug().Msgf("Request received: %s", string(received)) |
|
||||||
|
|
||||||
// Set model from bearer token, if available
|
|
||||||
bearer := strings.TrimLeft(c.Get("authorization"), "Bearer ") |
|
||||||
bearerExists := bearer != "" && loader.ExistsInModelPath(bearer) |
|
||||||
|
|
||||||
// If no model was specified, take the first available
|
|
||||||
if modelFile == "" && !bearerExists && randomModel { |
|
||||||
models, _ := loader.ListModels() |
|
||||||
if len(models) > 0 { |
|
||||||
modelFile = models[0] |
|
||||||
log.Debug().Msgf("No model specified, using: %s", modelFile) |
|
||||||
} else { |
|
||||||
log.Debug().Msgf("No model specified, returning error") |
|
||||||
return "", nil, fmt.Errorf("no model specified") |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// If a model is found in bearer token takes precedence
|
|
||||||
if bearerExists { |
|
||||||
log.Debug().Msgf("Using model from bearer token: %s", bearer) |
|
||||||
modelFile = bearer |
|
||||||
} |
|
||||||
return modelFile, input, nil |
|
||||||
} |
|
||||||
|
|
||||||
func updateConfig(config *config.Config, input *OpenAIRequest) { |
|
||||||
if input.Echo { |
|
||||||
config.Echo = input.Echo |
|
||||||
} |
|
||||||
if input.TopK != 0 { |
|
||||||
config.TopK = input.TopK |
|
||||||
} |
|
||||||
if input.TopP != 0 { |
|
||||||
config.TopP = input.TopP |
|
||||||
} |
|
||||||
|
|
||||||
if input.Grammar != "" { |
|
||||||
config.Grammar = input.Grammar |
|
||||||
} |
|
||||||
|
|
||||||
if input.Temperature != 0 { |
|
||||||
config.Temperature = input.Temperature |
|
||||||
} |
|
||||||
|
|
||||||
if input.Maxtokens != 0 { |
|
||||||
config.Maxtokens = input.Maxtokens |
|
||||||
} |
|
||||||
|
|
||||||
switch stop := input.Stop.(type) { |
|
||||||
case string: |
|
||||||
if stop != "" { |
|
||||||
config.StopWords = append(config.StopWords, stop) |
|
||||||
} |
|
||||||
case []interface{}: |
|
||||||
for _, pp := range stop { |
|
||||||
if s, ok := pp.(string); ok { |
|
||||||
config.StopWords = append(config.StopWords, s) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
if input.RepeatPenalty != 0 { |
|
||||||
config.RepeatPenalty = input.RepeatPenalty |
|
||||||
} |
|
||||||
|
|
||||||
if input.Keep != 0 { |
|
||||||
config.Keep = input.Keep |
|
||||||
} |
|
||||||
|
|
||||||
if input.Batch != 0 { |
|
||||||
config.Batch = input.Batch |
|
||||||
} |
|
||||||
|
|
||||||
if input.F16 { |
|
||||||
config.F16 = input.F16 |
|
||||||
} |
|
||||||
|
|
||||||
if input.IgnoreEOS { |
|
||||||
config.IgnoreEOS = input.IgnoreEOS |
|
||||||
} |
|
||||||
|
|
||||||
if input.Seed != 0 { |
|
||||||
config.Seed = input.Seed |
|
||||||
} |
|
||||||
|
|
||||||
if input.Mirostat != 0 { |
|
||||||
config.Mirostat = input.Mirostat |
|
||||||
} |
|
||||||
|
|
||||||
if input.MirostatETA != 0 { |
|
||||||
config.MirostatETA = input.MirostatETA |
|
||||||
} |
|
||||||
|
|
||||||
if input.MirostatTAU != 0 { |
|
||||||
config.MirostatTAU = input.MirostatTAU |
|
||||||
} |
|
||||||
|
|
||||||
if input.TypicalP != 0 { |
|
||||||
config.TypicalP = input.TypicalP |
|
||||||
} |
|
||||||
|
|
||||||
switch inputs := input.Input.(type) { |
|
||||||
case string: |
|
||||||
if inputs != "" { |
|
||||||
config.InputStrings = append(config.InputStrings, inputs) |
|
||||||
} |
|
||||||
case []interface{}: |
|
||||||
for _, pp := range inputs { |
|
||||||
switch i := pp.(type) { |
|
||||||
case string: |
|
||||||
config.InputStrings = append(config.InputStrings, i) |
|
||||||
case []interface{}: |
|
||||||
tokens := []int{} |
|
||||||
for _, ii := range i { |
|
||||||
tokens = append(tokens, int(ii.(float64))) |
|
||||||
} |
|
||||||
config.InputToken = append(config.InputToken, tokens) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Can be either a string or an object
|
|
||||||
switch fnc := input.FunctionCall.(type) { |
|
||||||
case string: |
|
||||||
if fnc != "" { |
|
||||||
config.SetFunctionCallString(fnc) |
|
||||||
} |
|
||||||
case map[string]interface{}: |
|
||||||
var name string |
|
||||||
n, exists := fnc["name"] |
|
||||||
if exists { |
|
||||||
nn, e := n.(string) |
|
||||||
if !e { |
|
||||||
name = nn |
|
||||||
} |
|
||||||
} |
|
||||||
config.SetFunctionCallNameString(name) |
|
||||||
} |
|
||||||
|
|
||||||
switch p := input.Prompt.(type) { |
|
||||||
case string: |
|
||||||
config.PromptStrings = append(config.PromptStrings, p) |
|
||||||
case []interface{}: |
|
||||||
for _, pp := range p { |
|
||||||
if s, ok := pp.(string); ok { |
|
||||||
config.PromptStrings = append(config.PromptStrings, s) |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func readConfig(modelFile string, input *OpenAIRequest, cm *config.ConfigLoader, loader *model.ModelLoader, debug bool, threads, ctx int, f16 bool) (*config.Config, *OpenAIRequest, error) { |
|
||||||
// Load a config file if present after the model name
|
|
||||||
modelConfig := filepath.Join(loader.ModelPath, modelFile+".yaml") |
|
||||||
|
|
||||||
var cfg *config.Config |
|
||||||
|
|
||||||
defaults := func() { |
|
||||||
cfg = config.DefaultConfig(modelFile) |
|
||||||
cfg.ContextSize = ctx |
|
||||||
cfg.Threads = threads |
|
||||||
cfg.F16 = f16 |
|
||||||
cfg.Debug = debug |
|
||||||
} |
|
||||||
|
|
||||||
cfgExisting, exists := cm.GetConfig(modelFile) |
|
||||||
if !exists { |
|
||||||
if _, err := os.Stat(modelConfig); err == nil { |
|
||||||
if err := cm.LoadConfig(modelConfig); err != nil { |
|
||||||
return nil, nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error()) |
|
||||||
} |
|
||||||
cfgExisting, exists = cm.GetConfig(modelFile) |
|
||||||
if exists { |
|
||||||
cfg = &cfgExisting |
|
||||||
} else { |
|
||||||
defaults() |
|
||||||
} |
|
||||||
} else { |
|
||||||
defaults() |
|
||||||
} |
|
||||||
} else { |
|
||||||
cfg = &cfgExisting |
|
||||||
} |
|
||||||
|
|
||||||
// Set the parameters for the language model prediction
|
|
||||||
updateConfig(cfg, input) |
|
||||||
|
|
||||||
// Don't allow 0 as setting
|
|
||||||
if cfg.Threads == 0 { |
|
||||||
if threads != 0 { |
|
||||||
cfg.Threads = threads |
|
||||||
} else { |
|
||||||
cfg.Threads = 4 |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
// Enforce debug flag if passed from CLI
|
|
||||||
if debug { |
|
||||||
cfg.Debug = true |
|
||||||
} |
|
||||||
|
|
||||||
return cfg, input, nil |
|
||||||
} |
|
@ -1,71 +0,0 @@ |
|||||||
package openai |
|
||||||
|
|
||||||
import ( |
|
||||||
"fmt" |
|
||||||
"io" |
|
||||||
"net/http" |
|
||||||
"os" |
|
||||||
"path" |
|
||||||
"path/filepath" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/api/backend" |
|
||||||
config "github.com/go-skynet/LocalAI/api/config" |
|
||||||
"github.com/go-skynet/LocalAI/api/options" |
|
||||||
|
|
||||||
"github.com/gofiber/fiber/v2" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
// https://platform.openai.com/docs/api-reference/audio/create
|
|
||||||
func TranscriptEndpoint(cm *config.ConfigLoader, o *options.Option) func(c *fiber.Ctx) error { |
|
||||||
return func(c *fiber.Ctx) error { |
|
||||||
m, input, err := readInput(c, o.Loader, false) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
|
|
||||||
config, input, err := readConfig(m, input, cm, o.Loader, o.Debug, o.Threads, o.ContextSize, o.F16) |
|
||||||
if err != nil { |
|
||||||
return fmt.Errorf("failed reading parameters from request:%w", err) |
|
||||||
} |
|
||||||
// retrieve the file data from the request
|
|
||||||
file, err := c.FormFile("file") |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
f, err := file.Open() |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
defer f.Close() |
|
||||||
|
|
||||||
dir, err := os.MkdirTemp("", "whisper") |
|
||||||
|
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
defer os.RemoveAll(dir) |
|
||||||
|
|
||||||
dst := filepath.Join(dir, path.Base(file.Filename)) |
|
||||||
dstFile, err := os.Create(dst) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
if _, err := io.Copy(dstFile, f); err != nil { |
|
||||||
log.Debug().Msgf("Audio file copying error %+v - %+v - err %+v", file.Filename, dst, err) |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("Audio file copied to: %+v", dst) |
|
||||||
|
|
||||||
tr, err := backend.ModelTranscription(dst, input.Language, o.Loader, *config, o) |
|
||||||
if err != nil { |
|
||||||
return err |
|
||||||
} |
|
||||||
|
|
||||||
log.Debug().Msgf("Trascribed: %+v", tr) |
|
||||||
// TODO: handle different outputs here
|
|
||||||
return c.Status(http.StatusOK).JSON(tr) |
|
||||||
} |
|
||||||
} |
|
@ -1,186 +0,0 @@ |
|||||||
package options |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"embed" |
|
||||||
"encoding/json" |
|
||||||
|
|
||||||
"github.com/go-skynet/LocalAI/pkg/gallery" |
|
||||||
model "github.com/go-skynet/LocalAI/pkg/model" |
|
||||||
"github.com/rs/zerolog/log" |
|
||||||
) |
|
||||||
|
|
||||||
type Option struct { |
|
||||||
Context context.Context |
|
||||||
ConfigFile string |
|
||||||
Loader *model.ModelLoader |
|
||||||
UploadLimitMB, Threads, ContextSize int |
|
||||||
F16 bool |
|
||||||
Debug, DisableMessage bool |
|
||||||
ImageDir string |
|
||||||
AudioDir string |
|
||||||
CORS bool |
|
||||||
PreloadJSONModels string |
|
||||||
PreloadModelsFromPath string |
|
||||||
CORSAllowOrigins string |
|
||||||
|
|
||||||
Galleries []gallery.Gallery |
|
||||||
|
|
||||||
BackendAssets embed.FS |
|
||||||
AssetsDestination string |
|
||||||
|
|
||||||
ExternalGRPCBackends map[string]string |
|
||||||
|
|
||||||
AutoloadGalleries bool |
|
||||||
} |
|
||||||
|
|
||||||
type AppOption func(*Option) |
|
||||||
|
|
||||||
func NewOptions(o ...AppOption) *Option { |
|
||||||
opt := &Option{ |
|
||||||
Context: context.Background(), |
|
||||||
UploadLimitMB: 15, |
|
||||||
Threads: 1, |
|
||||||
ContextSize: 512, |
|
||||||
Debug: true, |
|
||||||
DisableMessage: true, |
|
||||||
} |
|
||||||
for _, oo := range o { |
|
||||||
oo(opt) |
|
||||||
} |
|
||||||
return opt |
|
||||||
} |
|
||||||
|
|
||||||
func WithCors(b bool) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.CORS = b |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
var EnableGalleriesAutoload = func(o *Option) { |
|
||||||
o.AutoloadGalleries = true |
|
||||||
} |
|
||||||
|
|
||||||
func WithExternalBackend(name string, uri string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
if o.ExternalGRPCBackends == nil { |
|
||||||
o.ExternalGRPCBackends = make(map[string]string) |
|
||||||
} |
|
||||||
o.ExternalGRPCBackends[name] = uri |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithCorsAllowOrigins(b string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.CORSAllowOrigins = b |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithBackendAssetsOutput(out string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.AssetsDestination = out |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithBackendAssets(f embed.FS) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.BackendAssets = f |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithStringGalleries(galls string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
if galls == "" { |
|
||||||
log.Debug().Msgf("no galleries to load") |
|
||||||
return |
|
||||||
} |
|
||||||
var galleries []gallery.Gallery |
|
||||||
if err := json.Unmarshal([]byte(galls), &galleries); err != nil { |
|
||||||
log.Error().Msgf("failed loading galleries: %s", err.Error()) |
|
||||||
} |
|
||||||
o.Galleries = append(o.Galleries, galleries...) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithGalleries(galleries []gallery.Gallery) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.Galleries = append(o.Galleries, galleries...) |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithContext(ctx context.Context) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.Context = ctx |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithYAMLConfigPreload(configFile string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.PreloadModelsFromPath = configFile |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithJSONStringPreload(configFile string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.PreloadJSONModels = configFile |
|
||||||
} |
|
||||||
} |
|
||||||
func WithConfigFile(configFile string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.ConfigFile = configFile |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithModelLoader(loader *model.ModelLoader) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.Loader = loader |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithUploadLimitMB(limit int) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.UploadLimitMB = limit |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithThreads(threads int) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.Threads = threads |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithContextSize(ctxSize int) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.ContextSize = ctxSize |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithF16(f16 bool) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.F16 = f16 |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithDebug(debug bool) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.Debug = debug |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithDisableMessage(disableMessage bool) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.DisableMessage = disableMessage |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithAudioDir(audioDir string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.AudioDir = audioDir |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func WithImageDir(imageDir string) AppOption { |
|
||||||
return func(o *Option) { |
|
||||||
o.ImageDir = imageDir |
|
||||||
} |
|
||||||
} |
|
@ -1,6 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
import "embed" |
|
||||||
|
|
||||||
//go:embed backend-assets/*
|
|
||||||
var backendAssets embed.FS |
|
@ -1,22 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
bert "github.com/go-skynet/LocalAI/pkg/grpc/llm/bert" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &bert.Embeddings{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
bloomz "github.com/go-skynet/LocalAI/pkg/grpc/llm/bloomz" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &bloomz.LLM{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.Dolly{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.Falcon{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,25 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// GRPC Falcon server
|
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
falcon "github.com/go-skynet/LocalAI/pkg/grpc/llm/falcon" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &falcon.LLM{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.GPT2{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
gpt4all "github.com/go-skynet/LocalAI/pkg/grpc/llm/gpt4all" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &gpt4all.LLM{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.GPTJ{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.GPTNeoX{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
langchain "github.com/go-skynet/LocalAI/pkg/grpc/llm/langchain" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &langchain.LLM{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,25 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// GRPC Falcon server
|
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
llama "github.com/go-skynet/LocalAI/pkg/grpc/llm/llama-grammar" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &llama.LLM{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,25 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// GRPC Falcon server
|
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
llama "github.com/go-skynet/LocalAI/pkg/grpc/llm/llama" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &llama.LLM{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.MPT{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
tts "github.com/go-skynet/LocalAI/pkg/grpc/tts" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &tts.Piper{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.Replit{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
rwkv "github.com/go-skynet/LocalAI/pkg/grpc/llm/rwkv" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &rwkv.LLM{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
image "github.com/go-skynet/LocalAI/pkg/grpc/image" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &image.StableDiffusion{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transformers "github.com/go-skynet/LocalAI/pkg/grpc/llm/transformers" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transformers.Starcoder{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,23 +0,0 @@ |
|||||||
package main |
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
|
|
||||||
import ( |
|
||||||
"flag" |
|
||||||
|
|
||||||
transcribe "github.com/go-skynet/LocalAI/pkg/grpc/transcribe" |
|
||||||
|
|
||||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc" |
|
||||||
) |
|
||||||
|
|
||||||
var ( |
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to") |
|
||||||
) |
|
||||||
|
|
||||||
func main() { |
|
||||||
flag.Parse() |
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &transcribe.Whisper{}); err != nil { |
|
||||||
panic(err) |
|
||||||
} |
|
||||||
} |
|
@ -1,15 +0,0 @@ |
|||||||
version: '3.6' |
|
||||||
|
|
||||||
services: |
|
||||||
api: |
|
||||||
image: quay.io/go-skynet/local-ai:latest |
|
||||||
build: |
|
||||||
context: . |
|
||||||
dockerfile: Dockerfile |
|
||||||
ports: |
|
||||||
- 8080:8080 |
|
||||||
env_file: |
|
||||||
- .env |
|
||||||
volumes: |
|
||||||
- ./models:/models:cached |
|
||||||
command: ["/usr/bin/local-ai" ] |
|
@ -1,21 +0,0 @@ |
|||||||
#!/bin/bash |
|
||||||
set -e |
|
||||||
|
|
||||||
cd /build |
|
||||||
|
|
||||||
if [ "$REBUILD" != "false" ]; then |
|
||||||
rm -rf ./local-ai |
|
||||||
ESPEAK_DATA=/build/lib/Linux-$(uname -m)/piper_phonemize/lib/espeak-ng-data make build -j${BUILD_PARALLELISM:-1} |
|
||||||
else |
|
||||||
echo "@@@@@" |
|
||||||
echo "Skipping rebuild" |
|
||||||
echo "@@@@@" |
|
||||||
echo "If you are experiencing issues with the pre-compiled builds, try setting REBUILD=true" |
|
||||||
echo "If you are still experiencing issues with the build, try setting CMAKE_ARGS and disable the instructions set as needed:" |
|
||||||
echo 'CMAKE_ARGS="-DLLAMA_F16C=OFF -DLLAMA_AVX512=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF"' |
|
||||||
echo "see the documentation at: https://localai.io/basics/build/index.html" |
|
||||||
echo "Note: See also https://github.com/go-skynet/LocalAI/issues/288" |
|
||||||
echo "@@@@@" |
|
||||||
fi |
|
||||||
|
|
||||||
./local-ai "$@" |
|
@ -1,153 +0,0 @@ |
|||||||
# Examples |
|
||||||
|
|
||||||
Here is a list of projects that can easily be integrated with the LocalAI backend. |
|
||||||
|
|
||||||
### Projects |
|
||||||
|
|
||||||
### AutoGPT |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
This example shows how to use AutoGPT with LocalAI. |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/autoGPT/) |
|
||||||
|
|
||||||
### Chatbot-UI |
|
||||||
|
|
||||||
_by [@mkellerman](https://github.com/mkellerman)_ |
|
||||||
|
|
||||||
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) |
|
||||||
|
|
||||||
This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/) |
|
||||||
|
|
||||||
There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/) |
|
||||||
|
|
||||||
### K8sGPT |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
This example show how to use LocalAI inside Kubernetes with [k8sgpt](https://k8sgpt.ai). |
|
||||||
|
|
||||||
![Screenshot from 2023-06-19 23-58-47](https://github.com/go-skynet/go-ggml-transformers.cpp/assets/2420543/cab87409-ee68-44ae-8d53-41627fb49509) |
|
||||||
|
|
||||||
### Flowise |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI. |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/) |
|
||||||
|
|
||||||
### Discord bot |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
Run a discord bot which lets you talk directly with a model |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/discord-bot/), or for a live demo you can talk with our bot in #random-bot in our discord server. |
|
||||||
|
|
||||||
### Langchain |
|
||||||
|
|
||||||
_by [@dave-gray101](https://github.com/dave-gray101)_ |
|
||||||
|
|
||||||
A ready to use example to show e2e how to integrate LocalAI with langchain |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain/) |
|
||||||
|
|
||||||
### Langchain Python |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
A ready to use example to show e2e how to integrate LocalAI with langchain |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/) |
|
||||||
|
|
||||||
### LocalAI functions |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
A ready to use example to show how to use OpenAI functions with LocalAI |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/functions/) |
|
||||||
|
|
||||||
### LocalAI WebUI |
|
||||||
|
|
||||||
_by [@dhruvgera](https://github.com/dhruvgera)_ |
|
||||||
|
|
||||||
![image](https://user-images.githubusercontent.com/42107491/235344183-44b5967d-ba22-4331-804c-8da7004a5d35.png) |
|
||||||
|
|
||||||
A light, community-maintained web interface for LocalAI |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/localai-webui/) |
|
||||||
|
|
||||||
### How to run rwkv models |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
A full example on how to run RWKV models with LocalAI |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/) |
|
||||||
|
|
||||||
### PrivateGPT |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
A full example on how to run PrivateGPT with LocalAI |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/privateGPT/) |
|
||||||
|
|
||||||
### Slack bot |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
Run a slack bot which lets you talk directly with a model |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-bot/) |
|
||||||
|
|
||||||
### Slack bot (Question answering) |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
Run a slack bot, ideally for teams, which lets you ask questions on a documentation website, or a github repository. |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-qa-bot/) |
|
||||||
|
|
||||||
### Question answering on documents with llama-index |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
Shows how to integrate with [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents. |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/query_data/) |
|
||||||
|
|
||||||
### Question answering on documents with langchain and chroma |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler)_ |
|
||||||
|
|
||||||
Shows how to integrate with `Langchain` and `Chroma` to enable question answering on a set of documents. |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-chroma/) |
|
||||||
|
|
||||||
### Telegram bot |
|
||||||
|
|
||||||
_by [@mudler](https://github.com/mudler) |
|
||||||
|
|
||||||
![Screenshot from 2023-06-09 00-36-26](https://github.com/go-skynet/LocalAI/assets/2420543/e98b4305-fa2d-41cf-9d2f-1bb2d75ca902) |
|
||||||
|
|
||||||
Use LocalAI to power a Telegram bot assistant, with Image generation and audio support! |
|
||||||
|
|
||||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/telegram-bot/) |
|
||||||
|
|
||||||
### Template for Runpod.io |
|
||||||
|
|
||||||
_by [@fHachenberg](https://github.com/fHachenberg)_ |
|
||||||
|
|
||||||
Allows to run any LocalAI-compatible model as a backend on the servers of https://runpod.io |
|
||||||
|
|
||||||
[Check it out here](https://runpod.io/gsc?template=uv9mtqnrd0&ref=984wlcra) |
|
||||||
|
|
||||||
## Want to contribute? |
|
||||||
|
|
||||||
Create an issue, and put `Example: <description>` in the title! We will post your examples here. |
|
@ -1,5 +0,0 @@ |
|||||||
OPENAI_API_KEY=sk---anystringhere |
|
||||||
OPENAI_API_BASE=http://api:8080/v1 |
|
||||||
# Models to preload at start |
|
||||||
# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings |
|
||||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}] |
|
@ -1,32 +0,0 @@ |
|||||||
# AutoGPT |
|
||||||
|
|
||||||
Example of integration with [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT). |
|
||||||
|
|
||||||
## Run |
|
||||||
|
|
||||||
```bash |
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI/examples/autoGPT |
|
||||||
|
|
||||||
docker-compose run --rm auto-gpt |
|
||||||
``` |
|
||||||
|
|
||||||
Note: The example automatically downloads the `gpt4all` model as it is under a permissive license. The GPT4All model does not seem to be enough to run AutoGPT. WizardLM-7b-uncensored seems to perform better (with `f16: true`). |
|
||||||
|
|
||||||
See the `.env` configuration file to set a different model with the [model-gallery](https://github.com/go-skynet/model-gallery) by editing `PRELOAD_MODELS`. |
|
||||||
|
|
||||||
## Without docker |
|
||||||
|
|
||||||
Run AutoGPT with `OPENAI_API_BASE` pointing to the LocalAI endpoint. If you run it locally for instance: |
|
||||||
|
|
||||||
``` |
|
||||||
OPENAI_API_BASE=http://localhost:8080 python ... |
|
||||||
``` |
|
||||||
|
|
||||||
Note: you need a model named `gpt-3.5-turbo` and `text-embedding-ada-002`. You can preload those in LocalAI at start by setting in the env: |
|
||||||
|
|
||||||
``` |
|
||||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}] |
|
||||||
``` |
|
@ -1,42 +0,0 @@ |
|||||||
version: "3.9" |
|
||||||
services: |
|
||||||
api: |
|
||||||
image: quay.io/go-skynet/local-ai:latest |
|
||||||
ports: |
|
||||||
- 8080:8080 |
|
||||||
env_file: |
|
||||||
- .env |
|
||||||
environment: |
|
||||||
- DEBUG=true |
|
||||||
- MODELS_PATH=/models |
|
||||||
volumes: |
|
||||||
- ./models:/models:cached |
|
||||||
command: ["/usr/bin/local-ai" ] |
|
||||||
auto-gpt: |
|
||||||
image: significantgravitas/auto-gpt |
|
||||||
depends_on: |
|
||||||
api: |
|
||||||
condition: service_healthy |
|
||||||
redis: |
|
||||||
condition: service_started |
|
||||||
env_file: |
|
||||||
- .env |
|
||||||
environment: |
|
||||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis} |
|
||||||
REDIS_HOST: ${REDIS_HOST:-redis} |
|
||||||
profiles: ["exclude-from-up"] |
|
||||||
volumes: |
|
||||||
- ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace |
|
||||||
- ./data:/app/data |
|
||||||
## allow auto-gpt to write logs to disk |
|
||||||
- ./logs:/app/logs |
|
||||||
## uncomment following lines if you want to make use of these files |
|
||||||
## you must have them existing in the same folder as this docker-compose.yml |
|
||||||
#- type: bind |
|
||||||
# source: ./azure.yaml |
|
||||||
# target: /app/azure.yaml |
|
||||||
#- type: bind |
|
||||||
# source: ./ai_settings.yaml |
|
||||||
# target: /app/ai_settings.yaml |
|
||||||
redis: |
|
||||||
image: "redis/redis-stack-server:latest" |
|
@ -1,48 +0,0 @@ |
|||||||
# chatbot-ui |
|
||||||
|
|
||||||
Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). |
|
||||||
|
|
||||||
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) |
|
||||||
|
|
||||||
## Setup |
|
||||||
|
|
||||||
```bash |
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI/examples/chatbot-ui |
|
||||||
|
|
||||||
# (optional) Checkout a specific LocalAI tag |
|
||||||
# git checkout -b build <TAG> |
|
||||||
|
|
||||||
# Download gpt4all-j to models/ |
|
||||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j |
|
||||||
|
|
||||||
# start with docker-compose |
|
||||||
docker-compose up -d --pull always |
|
||||||
# or you can build the images with: |
|
||||||
# docker-compose up -d --build |
|
||||||
``` |
|
||||||
|
|
||||||
## Pointing chatbot-ui to a separately managed LocalAI service |
|
||||||
|
|
||||||
If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<<LOCALAI_IP>>` below): |
|
||||||
``` |
|
||||||
version: '3.6' |
|
||||||
|
|
||||||
services: |
|
||||||
chatgpt: |
|
||||||
image: ghcr.io/mckaywrigley/chatbot-ui:main |
|
||||||
ports: |
|
||||||
- 3000:3000 |
|
||||||
environment: |
|
||||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' |
|
||||||
- 'OPENAI_API_HOST=http://<<LOCALAI_IP>>:8080' |
|
||||||
``` |
|
||||||
|
|
||||||
Once you've edited the Dockerfile, you can start it with `docker compose up`, then browse to `http://localhost:3000`. |
|
||||||
|
|
||||||
## Accessing chatbot-ui |
|
||||||
|
|
||||||
Open http://localhost:3000 for the Web UI. |
|
||||||
|
|
@ -1,24 +0,0 @@ |
|||||||
version: '3.6' |
|
||||||
|
|
||||||
services: |
|
||||||
api: |
|
||||||
image: quay.io/go-skynet/local-ai:latest |
|
||||||
build: |
|
||||||
context: ../../ |
|
||||||
dockerfile: Dockerfile |
|
||||||
ports: |
|
||||||
- 8080:8080 |
|
||||||
environment: |
|
||||||
- DEBUG=true |
|
||||||
- MODELS_PATH=/models |
|
||||||
volumes: |
|
||||||
- ./models:/models:cached |
|
||||||
command: ["/usr/bin/local-ai" ] |
|
||||||
|
|
||||||
chatgpt: |
|
||||||
image: ghcr.io/mckaywrigley/chatbot-ui:main |
|
||||||
ports: |
|
||||||
- 3000:3000 |
|
||||||
environment: |
|
||||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' |
|
||||||
- 'OPENAI_API_HOST=http://api:8080' |
|
@ -1 +0,0 @@ |
|||||||
{{.Input}} |
|
@ -1,16 +0,0 @@ |
|||||||
name: gpt-3.5-turbo |
|
||||||
parameters: |
|
||||||
model: ggml-gpt4all-j |
|
||||||
top_k: 80 |
|
||||||
temperature: 0.2 |
|
||||||
top_p: 0.7 |
|
||||||
context_size: 1024 |
|
||||||
stopwords: |
|
||||||
- "HUMAN:" |
|
||||||
- "GPT:" |
|
||||||
roles: |
|
||||||
user: " " |
|
||||||
system: " " |
|
||||||
template: |
|
||||||
completion: completion |
|
||||||
chat: gpt4all |
|
@ -1,4 +0,0 @@ |
|||||||
The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. |
|
||||||
### Prompt: |
|
||||||
{{.Input}} |
|
||||||
### Response: |
|
@ -1,44 +0,0 @@ |
|||||||
# chatbot-ui |
|
||||||
|
|
||||||
Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui). |
|
||||||
|
|
||||||
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) |
|
||||||
|
|
||||||
## Run |
|
||||||
|
|
||||||
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml` |
|
||||||
```bash |
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI/examples/chatbot-ui |
|
||||||
|
|
||||||
# start with docker-compose |
|
||||||
docker-compose up --pull always |
|
||||||
|
|
||||||
# or you can build the images with: |
|
||||||
# docker-compose up -d --build |
|
||||||
``` |
|
||||||
|
|
||||||
## Pointing chatbot-ui to a separately managed LocalAI service |
|
||||||
|
|
||||||
If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<<LOCALAI_IP>>` below): |
|
||||||
``` |
|
||||||
version: '3.6' |
|
||||||
|
|
||||||
services: |
|
||||||
chatgpt: |
|
||||||
image: ghcr.io/mckaywrigley/chatbot-ui:main |
|
||||||
ports: |
|
||||||
- 3000:3000 |
|
||||||
environment: |
|
||||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' |
|
||||||
- 'OPENAI_API_HOST=http://<<LOCALAI_IP>>:8080' |
|
||||||
``` |
|
||||||
|
|
||||||
Once you've edited the Dockerfile, you can start it with `docker compose up`, then browse to `http://localhost:3000`. |
|
||||||
|
|
||||||
## Accessing chatbot-ui |
|
||||||
|
|
||||||
Open http://localhost:3000 for the Web UI. |
|
||||||
|
|
@ -1,37 +0,0 @@ |
|||||||
version: '3.6' |
|
||||||
|
|
||||||
services: |
|
||||||
api: |
|
||||||
image: quay.io/go-skynet/local-ai:latest |
|
||||||
# As initially LocalAI will download the models defined in PRELOAD_MODELS |
|
||||||
# you might need to tweak the healthcheck values here according to your network connection. |
|
||||||
# Here we give a timespan of 20m to download all the required files. |
|
||||||
healthcheck: |
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] |
|
||||||
interval: 1m |
|
||||||
timeout: 20m |
|
||||||
retries: 20 |
|
||||||
build: |
|
||||||
context: ../../ |
|
||||||
dockerfile: Dockerfile |
|
||||||
ports: |
|
||||||
- 8080:8080 |
|
||||||
environment: |
|
||||||
- DEBUG=true |
|
||||||
- MODELS_PATH=/models |
|
||||||
# You can preload different models here as well. |
|
||||||
# See: https://github.com/go-skynet/model-gallery |
|
||||||
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]' |
|
||||||
volumes: |
|
||||||
- ./models:/models:cached |
|
||||||
command: ["/usr/bin/local-ai" ] |
|
||||||
chatgpt: |
|
||||||
depends_on: |
|
||||||
api: |
|
||||||
condition: service_healthy |
|
||||||
image: ghcr.io/mckaywrigley/chatbot-ui:main |
|
||||||
ports: |
|
||||||
- 3000:3000 |
|
||||||
environment: |
|
||||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX' |
|
||||||
- 'OPENAI_API_HOST=http://api:8080' |
|
@ -1,6 +0,0 @@ |
|||||||
OPENAI_API_KEY=x |
|
||||||
DISCORD_BOT_TOKEN=x |
|
||||||
DISCORD_CLIENT_ID=x |
|
||||||
OPENAI_API_BASE=http://api:8080 |
|
||||||
ALLOWED_SERVER_IDS=x |
|
||||||
SERVER_TO_MODERATION_CHANNEL=1:1 |
|
@ -1,76 +0,0 @@ |
|||||||
# discord-bot |
|
||||||
|
|
||||||
![Screenshot from 2023-05-01 07-58-19](https://user-images.githubusercontent.com/2420543/235413924-0cb2e75b-f2d6-4119-8610-44386e44afb8.png) |
|
||||||
|
|
||||||
## Setup |
|
||||||
|
|
||||||
```bash |
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI/examples/discord-bot |
|
||||||
|
|
||||||
# (optional) Checkout a specific LocalAI tag |
|
||||||
# git checkout -b build <TAG> |
|
||||||
|
|
||||||
# Download gpt4all-j to models/ |
|
||||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j |
|
||||||
|
|
||||||
# Set the discord bot options (see: https://github.com/go-skynet/gpt-discord-bot#setup) |
|
||||||
cp -rfv .env.example .env |
|
||||||
vim .env |
|
||||||
|
|
||||||
# start with docker-compose |
|
||||||
docker-compose up -d --build |
|
||||||
``` |
|
||||||
|
|
||||||
Note: see setup options here: https://github.com/go-skynet/gpt-discord-bot#setup |
|
||||||
|
|
||||||
Open up the URL in the console and give permission to the bot in your server. Start a thread with `/chat ..` |
|
||||||
|
|
||||||
## Kubernetes |
|
||||||
|
|
||||||
- install the local-ai chart first |
|
||||||
- change OPENAI_API_BASE to point to the API address and apply the discord-bot manifest: |
|
||||||
|
|
||||||
```yaml |
|
||||||
apiVersion: v1 |
|
||||||
kind: Namespace |
|
||||||
metadata: |
|
||||||
name: discord-bot |
|
||||||
--- |
|
||||||
apiVersion: apps/v1 |
|
||||||
kind: Deployment |
|
||||||
metadata: |
|
||||||
name: localai |
|
||||||
namespace: discord-bot |
|
||||||
labels: |
|
||||||
app: localai |
|
||||||
spec: |
|
||||||
selector: |
|
||||||
matchLabels: |
|
||||||
app: localai |
|
||||||
replicas: 1 |
|
||||||
template: |
|
||||||
metadata: |
|
||||||
labels: |
|
||||||
app: localai |
|
||||||
name: localai |
|
||||||
spec: |
|
||||||
containers: |
|
||||||
- name: localai-discord |
|
||||||
env: |
|
||||||
- name: OPENAI_API_KEY |
|
||||||
value: "x" |
|
||||||
- name: DISCORD_BOT_TOKEN |
|
||||||
value: "" |
|
||||||
- name: DISCORD_CLIENT_ID |
|
||||||
value: "" |
|
||||||
- name: OPENAI_API_BASE |
|
||||||
value: "http://local-ai.default.svc.cluster.local:8080" |
|
||||||
- name: ALLOWED_SERVER_IDS |
|
||||||
value: "xx" |
|
||||||
- name: SERVER_TO_MODERATION_CHANNEL |
|
||||||
value: "1:1" |
|
||||||
image: quay.io/go-skynet/gpt-discord-bot:main |
|
||||||
``` |
|
@ -1,21 +0,0 @@ |
|||||||
version: '3.6' |
|
||||||
|
|
||||||
services: |
|
||||||
api: |
|
||||||
image: quay.io/go-skynet/local-ai:latest |
|
||||||
build: |
|
||||||
context: ../../ |
|
||||||
dockerfile: Dockerfile |
|
||||||
ports: |
|
||||||
- 8080:8080 |
|
||||||
environment: |
|
||||||
- DEBUG=true |
|
||||||
- MODELS_PATH=/models |
|
||||||
volumes: |
|
||||||
- ./models:/models:cached |
|
||||||
command: ["/usr/bin/local-ai" ] |
|
||||||
|
|
||||||
bot: |
|
||||||
image: quay.io/go-skynet/gpt-discord-bot:main |
|
||||||
env_file: |
|
||||||
- .env |
|
@ -1 +0,0 @@ |
|||||||
../chatbot-ui/models/ |
|
@ -1,30 +0,0 @@ |
|||||||
# flowise |
|
||||||
|
|
||||||
Example of integration with [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise). |
|
||||||
|
|
||||||
![Screenshot from 2023-05-30 18-01-03](https://github.com/go-skynet/LocalAI/assets/2420543/02458782-0549-4131-971c-95ee56ec1af8) |
|
||||||
|
|
||||||
You can check a demo video in the Flowise PR: https://github.com/FlowiseAI/Flowise/pull/123 |
|
||||||
|
|
||||||
## Run |
|
||||||
|
|
||||||
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml` |
|
||||||
```bash |
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI/examples/flowise |
|
||||||
|
|
||||||
# start with docker-compose |
|
||||||
docker-compose up --pull always |
|
||||||
|
|
||||||
``` |
|
||||||
|
|
||||||
## Accessing flowise |
|
||||||
|
|
||||||
Open http://localhost:3000. |
|
||||||
|
|
||||||
## Using LocalAI |
|
||||||
|
|
||||||
Search for LocalAI in the integration, and use the `http://api:8080/` as URL. |
|
||||||
|
|
@ -1,37 +0,0 @@ |
|||||||
version: '3.6' |
|
||||||
|
|
||||||
services: |
|
||||||
api: |
|
||||||
image: quay.io/go-skynet/local-ai:latest |
|
||||||
# As initially LocalAI will download the models defined in PRELOAD_MODELS |
|
||||||
# you might need to tweak the healthcheck values here according to your network connection. |
|
||||||
# Here we give a timespan of 20m to download all the required files. |
|
||||||
healthcheck: |
|
||||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"] |
|
||||||
interval: 1m |
|
||||||
timeout: 20m |
|
||||||
retries: 20 |
|
||||||
build: |
|
||||||
context: ../../ |
|
||||||
dockerfile: Dockerfile |
|
||||||
ports: |
|
||||||
- 8080:8080 |
|
||||||
environment: |
|
||||||
- DEBUG=true |
|
||||||
- MODELS_PATH=/models |
|
||||||
# You can preload different models here as well. |
|
||||||
# See: https://github.com/go-skynet/model-gallery |
|
||||||
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]' |
|
||||||
volumes: |
|
||||||
- ./models:/models:cached |
|
||||||
command: ["/usr/bin/local-ai" ] |
|
||||||
flowise: |
|
||||||
depends_on: |
|
||||||
api: |
|
||||||
condition: service_healthy |
|
||||||
image: flowiseai/flowise |
|
||||||
ports: |
|
||||||
- 3000:3000 |
|
||||||
volumes: |
|
||||||
- ~/.flowise:/root/.flowise |
|
||||||
command: /bin/sh -c "sleep 3; flowise start" |
|
@ -1,9 +0,0 @@ |
|||||||
OPENAI_API_KEY=sk---anystringhere |
|
||||||
OPENAI_API_BASE=http://api:8080/v1 |
|
||||||
# Models to preload at start |
|
||||||
# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings |
|
||||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/openllama-7b-open-instruct.yaml", "name": "gpt-3.5-turbo"}] |
|
||||||
|
|
||||||
## Change the default number of threads |
|
||||||
#THREADS=14 |
|
||||||
|
|
@ -1,5 +0,0 @@ |
|||||||
FROM python:3.10-bullseye |
|
||||||
COPY . /app |
|
||||||
WORKDIR /app |
|
||||||
RUN pip install --no-cache-dir -r requirements.txt |
|
||||||
ENTRYPOINT [ "python", "./functions-openai.py" ]; |
|
@ -1,18 +0,0 @@ |
|||||||
# LocalAI functions |
|
||||||
|
|
||||||
Example of using LocalAI functions, see the [OpenAI](https://openai.com/blog/function-calling-and-other-api-updates) blog post. |
|
||||||
|
|
||||||
## Run |
|
||||||
|
|
||||||
```bash |
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI/examples/functions |
|
||||||
|
|
||||||
docker-compose run --rm functions |
|
||||||
``` |
|
||||||
|
|
||||||
Note: The example automatically downloads the `openllama` model as it is under a permissive license. |
|
||||||
|
|
||||||
See the `.env` configuration file to set a different model with the [model-gallery](https://github.com/go-skynet/model-gallery) by editing `PRELOAD_MODELS`. |
|
@ -1,23 +0,0 @@ |
|||||||
version: "3.9" |
|
||||||
services: |
|
||||||
api: |
|
||||||
image: quay.io/go-skynet/local-ai:master |
|
||||||
ports: |
|
||||||
- 8080:8080 |
|
||||||
env_file: |
|
||||||
- .env |
|
||||||
environment: |
|
||||||
- DEBUG=true |
|
||||||
- MODELS_PATH=/models |
|
||||||
volumes: |
|
||||||
- ./models:/models:cached |
|
||||||
command: ["/usr/bin/local-ai" ] |
|
||||||
functions: |
|
||||||
build: |
|
||||||
context: . |
|
||||||
dockerfile: Dockerfile |
|
||||||
depends_on: |
|
||||||
api: |
|
||||||
condition: service_healthy |
|
||||||
env_file: |
|
||||||
- .env |
|
@ -1,76 +0,0 @@ |
|||||||
import openai |
|
||||||
import json |
|
||||||
|
|
||||||
# Example dummy function hard coded to return the same weather |
|
||||||
# In production, this could be your backend API or an external API |
|
||||||
def get_current_weather(location, unit="fahrenheit"): |
|
||||||
"""Get the current weather in a given location""" |
|
||||||
weather_info = { |
|
||||||
"location": location, |
|
||||||
"temperature": "72", |
|
||||||
"unit": unit, |
|
||||||
"forecast": ["sunny", "windy"], |
|
||||||
} |
|
||||||
return json.dumps(weather_info) |
|
||||||
|
|
||||||
|
|
||||||
def run_conversation(): |
|
||||||
# Step 1: send the conversation and available functions to GPT |
|
||||||
messages = [{"role": "user", "content": "What's the weather like in Boston?"}] |
|
||||||
functions = [ |
|
||||||
{ |
|
||||||
"name": "get_current_weather", |
|
||||||
"description": "Get the current weather in a given location", |
|
||||||
"parameters": { |
|
||||||
"type": "object", |
|
||||||
"properties": { |
|
||||||
"location": { |
|
||||||
"type": "string", |
|
||||||
"description": "The city and state, e.g. San Francisco, CA", |
|
||||||
}, |
|
||||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, |
|
||||||
}, |
|
||||||
"required": ["location"], |
|
||||||
}, |
|
||||||
} |
|
||||||
] |
|
||||||
response = openai.ChatCompletion.create( |
|
||||||
model="gpt-3.5-turbo", |
|
||||||
messages=messages, |
|
||||||
functions=functions, |
|
||||||
function_call="auto", # auto is default, but we'll be explicit |
|
||||||
) |
|
||||||
response_message = response["choices"][0]["message"] |
|
||||||
|
|
||||||
# Step 2: check if GPT wanted to call a function |
|
||||||
if response_message.get("function_call"): |
|
||||||
# Step 3: call the function |
|
||||||
# Note: the JSON response may not always be valid; be sure to handle errors |
|
||||||
available_functions = { |
|
||||||
"get_current_weather": get_current_weather, |
|
||||||
} # only one function in this example, but you can have multiple |
|
||||||
function_name = response_message["function_call"]["name"] |
|
||||||
fuction_to_call = available_functions[function_name] |
|
||||||
function_args = json.loads(response_message["function_call"]["arguments"]) |
|
||||||
function_response = fuction_to_call( |
|
||||||
location=function_args.get("location"), |
|
||||||
unit=function_args.get("unit"), |
|
||||||
) |
|
||||||
|
|
||||||
# Step 4: send the info on the function call and function response to GPT |
|
||||||
messages.append(response_message) # extend conversation with assistant's reply |
|
||||||
messages.append( |
|
||||||
{ |
|
||||||
"role": "function", |
|
||||||
"name": function_name, |
|
||||||
"content": function_response, |
|
||||||
} |
|
||||||
) # extend conversation with function response |
|
||||||
second_response = openai.ChatCompletion.create( |
|
||||||
model="gpt-3.5-turbo", |
|
||||||
messages=messages, |
|
||||||
) # get a new response from GPT where it can see the function response |
|
||||||
return second_response |
|
||||||
|
|
||||||
|
|
||||||
print(run_conversation()) |
|
@ -1,2 +0,0 @@ |
|||||||
langchain==0.0.234 |
|
||||||
openai==0.27.8 |
|
@ -1,70 +0,0 @@ |
|||||||
# k8sgpt example |
|
||||||
|
|
||||||
This example show how to use LocalAI with k8sgpt |
|
||||||
|
|
||||||
![Screenshot from 2023-06-19 23-58-47](https://github.com/go-skynet/go-ggml-transformers.cpp/assets/2420543/cab87409-ee68-44ae-8d53-41627fb49509) |
|
||||||
|
|
||||||
## Create the cluster locally with Kind (optional) |
|
||||||
|
|
||||||
If you want to test this locally without a remote Kubernetes cluster, you can use kind. |
|
||||||
|
|
||||||
Install [kind](https://kind.sigs.k8s.io/) and create a cluster: |
|
||||||
|
|
||||||
``` |
|
||||||
kind create cluster |
|
||||||
``` |
|
||||||
|
|
||||||
## Setup LocalAI |
|
||||||
|
|
||||||
We will use [helm](https://helm.sh/docs/intro/install/): |
|
||||||
|
|
||||||
``` |
|
||||||
helm repo add go-skynet https://go-skynet.github.io/helm-charts/ |
|
||||||
helm repo update |
|
||||||
|
|
||||||
# Clone LocalAI |
|
||||||
git clone https://github.com/go-skynet/LocalAI |
|
||||||
|
|
||||||
cd LocalAI/examples/k8sgpt |
|
||||||
|
|
||||||
# modify values.yaml preload_models with the models you want to install. |
|
||||||
# CHANGE the URL to a model in huggingface. |
|
||||||
helm install local-ai go-skynet/local-ai --create-namespace --namespace local-ai --values values.yaml |
|
||||||
``` |
|
||||||
|
|
||||||
## Setup K8sGPT |
|
||||||
|
|
||||||
``` |
|
||||||
# Install k8sgpt |
|
||||||
helm repo add k8sgpt https://charts.k8sgpt.ai/ |
|
||||||
helm repo update |
|
||||||
helm install release k8sgpt/k8sgpt-operator -n k8sgpt-operator-system --create-namespace |
|
||||||
``` |
|
||||||
|
|
||||||
Apply the k8sgpt-operator configuration: |
|
||||||
|
|
||||||
``` |
|
||||||
kubectl apply -f - << EOF |
|
||||||
apiVersion: core.k8sgpt.ai/v1alpha1 |
|
||||||
kind: K8sGPT |
|
||||||
metadata: |
|
||||||
name: k8sgpt-local-ai |
|
||||||
namespace: default |
|
||||||
spec: |
|
||||||
backend: localai |
|
||||||
baseUrl: http://local-ai.local-ai.svc.cluster.local:8080/v1 |
|
||||||
noCache: false |
|
||||||
model: gpt-3.5-turbo |
|
||||||
noCache: false |
|
||||||
version: v0.3.0 |
|
||||||
enableAI: true |
|
||||||
EOF |
|
||||||
``` |
|
||||||
|
|
||||||
## Test |
|
||||||
|
|
||||||
Apply a broken pod: |
|
||||||
|
|
||||||
``` |
|
||||||
kubectl apply -f broken-pod.yaml |
|
||||||
``` |
|
@ -1,14 +0,0 @@ |
|||||||
apiVersion: v1 |
|
||||||
kind: Pod |
|
||||||
metadata: |
|
||||||
name: broken-pod |
|
||||||
spec: |
|
||||||
containers: |
|
||||||
- name: broken-pod |
|
||||||
image: nginx:1.a.b.c |
|
||||||
livenessProbe: |
|
||||||
httpGet: |
|
||||||
path: / |
|
||||||
port: 90 |
|
||||||
initialDelaySeconds: 3 |
|
||||||
periodSeconds: 3 |
|
@ -1,95 +0,0 @@ |
|||||||
replicaCount: 1 |
|
||||||
|
|
||||||
deployment: |
|
||||||
# https://quay.io/repository/go-skynet/local-ai?tab=tags |
|
||||||
image: quay.io/go-skynet/local-ai:latest |
|
||||||
env: |
|
||||||
threads: 4 |
|
||||||
debug: "true" |
|
||||||
context_size: 512 |
|
||||||
preload_models: '[{ "url": "github:go-skynet/model-gallery/wizard.yaml", "name": "gpt-3.5-turbo", "overrides": { "parameters": { "model": "WizardLM-7B-uncensored.ggmlv3.q5_1" }},"files": [ { "uri": "https://huggingface.co//WizardLM-7B-uncensored-GGML/resolve/main/WizardLM-7B-uncensored.ggmlv3.q5_1.bin", "sha256": "d92a509d83a8ea5e08ba4c2dbaf08f29015932dc2accd627ce0665ac72c2bb2b", "filename": "WizardLM-7B-uncensored.ggmlv3.q5_1" }]}]' |
|
||||||
modelsPath: "/models" |
|
||||||
|
|
||||||
resources: |
|
||||||
{} |
|
||||||
# We usually recommend not to specify default resources and to leave this as a conscious |
|
||||||
# choice for the user. This also increases chances charts run on environments with little |
|
||||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following |
|
||||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'. |
|
||||||
# limits: |
|
||||||
# cpu: 100m |
|
||||||
# memory: 128Mi |
|
||||||
# requests: |
|
||||||
# cpu: 100m |
|
||||||
# memory: 128Mi |
|
||||||
|
|
||||||
# Prompt templates to include |
|
||||||
# Note: the keys of this map will be the names of the prompt template files |
|
||||||
promptTemplates: |
|
||||||
{} |
|
||||||
# ggml-gpt4all-j.tmpl: | |
|
||||||
# The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response. |
|
||||||
# ### Prompt: |
|
||||||
# {{.Input}} |
|
||||||
# ### Response: |
|
||||||
|
|
||||||
# Models to download at runtime |
|
||||||
models: |
|
||||||
# Whether to force download models even if they already exist |
|
||||||
forceDownload: false |
|
||||||
|
|
||||||
# The list of URLs to download models from |
|
||||||
# Note: the name of the file will be the name of the loaded model |
|
||||||
list: |
|
||||||
#- url: "https://gpt4all.io/models/ggml-gpt4all-j.bin" |
|
||||||
# basicAuth: base64EncodedCredentials |
|
||||||
|
|
||||||
# Persistent storage for models and prompt templates. |
|
||||||
# PVC and HostPath are mutually exclusive. If both are enabled, |
|
||||||
# PVC configuration takes precedence. If neither are enabled, ephemeral |
|
||||||
# storage is used. |
|
||||||
persistence: |
|
||||||
pvc: |
|
||||||
enabled: false |
|
||||||
size: 6Gi |
|
||||||
accessModes: |
|
||||||
- ReadWriteOnce |
|
||||||
|
|
||||||
annotations: {} |
|
||||||
|
|
||||||
# Optional |
|
||||||
storageClass: ~ |
|
||||||
|
|
||||||
hostPath: |
|
||||||
enabled: false |
|
||||||
path: "/models" |
|
||||||
|
|
||||||
service: |
|
||||||
type: ClusterIP |
|
||||||
port: 8080 |
|
||||||
annotations: {} |
|
||||||
# If using an AWS load balancer, you'll need to override the default 60s load balancer idle timeout |
|
||||||
# service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "1200" |
|
||||||
|
|
||||||
ingress: |
|
||||||
enabled: false |
|
||||||
className: "" |
|
||||||
annotations: |
|
||||||
{} |
|
||||||
# kubernetes.io/ingress.class: nginx |
|
||||||
# kubernetes.io/tls-acme: "true" |
|
||||||
hosts: |
|
||||||
- host: chart-example.local |
|
||||||
paths: |
|
||||||
- path: / |
|
||||||
pathType: ImplementationSpecific |
|
||||||
tls: [] |
|
||||||
# - secretName: chart-example-tls |
|
||||||
# hosts: |
|
||||||
# - chart-example.local |
|
||||||
|
|
||||||
nodeSelector: {} |
|
||||||
|
|
||||||
tolerations: [] |
|
||||||
|
|
||||||
affinity: {} |
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue