Skip to content

Commit

Permalink
feat: aichat service integration, log levels, ollama modelfiles
Browse files Browse the repository at this point in the history
  • Loading branch information
av committed Sep 4, 2024
1 parent eb12061 commit 521e224
Show file tree
Hide file tree
Showing 22 changed files with 496 additions and 151 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ harbor open

##### Satellites

[SearXNG](https://github.com/av/harbor/wiki/Services#searxng) ⦁︎ [Perplexica](https://github.com/av/harbor/wiki/Services#perplexica) ⦁︎ [Dify](https://github.com/av/harbor/wiki/Services#dify) ⦁︎ [Plandex](https://github.com/av/harbor/wiki/Services#plandex) ⦁︎ [LiteLLM](https://github.com/av/harbor/wiki/Services#-litellm) ⦁︎ [LangFuse](https://github.com/av/harbor/wiki/Services#langfuse) ⦁︎ [Open Interpreter](https://github.com/av/harbor/wiki/Services#-open-interpreter) ⦁︎ [cloudflared](https://github.com/av/harbor/wiki/Services#cloudflared) ⦁︎ [cmdh](https://github.com/av/harbor/wiki/Services#cmdh) ⦁︎ [fabric](https://github.com/av/harbor/wiki/Services#fabric) ⦁︎ [txtai RAG](https://github.com/av/harbor/wiki/Services#txtai-rag) ⦁︎ [TextGrad](https://github.com/av/harbor/wiki/Services#textgrad) ⦁︎ [Aider](https://github.com/av/harbor/wiki/Services#aider)
[SearXNG](https://github.com/av/harbor/wiki/Services#searxng) ⦁︎ [Perplexica](https://github.com/av/harbor/wiki/Services#perplexica) ⦁︎ [Dify](https://github.com/av/harbor/wiki/Services#dify) ⦁︎ [Plandex](https://github.com/av/harbor/wiki/Services#plandex) ⦁︎ [LiteLLM](https://github.com/av/harbor/wiki/Services#-litellm) ⦁︎ [LangFuse](https://github.com/av/harbor/wiki/Services#langfuse) ⦁︎ [Open Interpreter](https://github.com/av/harbor/wiki/Services#-open-interpreter) ⦁︎ [cloudflared](https://github.com/av/harbor/wiki/Services#cloudflared) ⦁︎ [cmdh](https://github.com/av/harbor/wiki/Services#cmdh) ⦁︎ [fabric](https://github.com/av/harbor/wiki/Services#fabric) ⦁︎ [txtai RAG](https://github.com/av/harbor/wiki/Services#txtai-rag) ⦁︎ [TextGrad](https://github.com/av/harbor/wiki/Services#textgrad) ⦁︎ [Aider](https://github.com/av/harbor/wiki/Services#aider) ⦁︎ [aichat](https://github.com/av/harbor/wiki/Services#aichat)

## Why?

Expand Down
10 changes: 10 additions & 0 deletions aichat/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
FROM python:3.11
SHELL ["/bin/bash", "-c"]

WORKDIR /app
RUN pip install pyyaml

RUN curl https://zyedidia.github.io/eget.sh | sh
RUN ./eget sigoden/aichat

ENTRYPOINT [ "/app/aichat" ]
8 changes: 8 additions & 0 deletions aichat/configs/aichat.airllm.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: airllm
api_base: http://airllm:5000/v1
api_key: sk-airllm
models:
- name: ${HARBOR_AICHAT_MODEL}

8 changes: 8 additions & 0 deletions aichat/configs/aichat.aphrodite.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: aphrodite
api_base: http://aphrodite:7860/v1
api_key: sk-aphrodite
models:
- name: ${HARBOR_AICHAT_MODEL}

115 changes: 115 additions & 0 deletions aichat/configs/aichat.config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
# This is where other configs will be merged into
# See official example: https://github.com/sigoden/aichat/blob/main/config.example.yaml

# ---- llm ----

# This field requires a prefix matching
# the name of the openai-compatible backend,
# so it's set from the "cross" files, e.g. aichat.ollama.yml
# model: ollama:${HARBOR_AICHAT_MODEL}

temperature: null # Set default temperature parameter
top_p: null # Set default top-p parameter, range (0, 1)

# ---- behavior ----
stream: true # Controls whether to use the stream-style API.
save: true # Indicates whether to persist the message
keybindings: emacs # Choose keybinding style (emacs, vi)
editor: null # Specifies the command used to edit input buffer or session. (e.g. vim, emacs, nano).
wrap: 'auto' # Controls text wrapping (no, auto, <max-width>)
wrap_code: false # Enables or disables wrapping of code blocks

# ---- prelude ----
prelude: null # Set a default role or session to start with (e.g. role:<name>, session:<name>)
repl_prelude: null # Overrides the `prelude` setting specifically for conversations started in REPL
agent_prelude: null # Set a session to use when starting a agent. (e.g. temp, default)

# ---- session ----
# Controls the persistence of the session. if true, auto save; if false, not save; if null, asking the user
save_session: null
# Compress session when token count reaches or exceeds this threshold
compress_threshold: 4000
# Text prompt used for creating a concise summary of session message
summarize_prompt: 'Summarize the discussion briefly in 200 words or less to use as a prompt for future context.'
# Text prompt used for including the summary of the entire session
summary_prompt: 'This is a summary of the chat history as a recap: '

# ---- function-calling ----
# Visit https://github.com/sigoden/llm-functions for setup instructions
function_calling: true # Enables or disables function calling (Globally).
mapping_tools: # Alias for a tool or toolset
fs: 'fs_cat,fs_ls,fs_mkdir,fs_rm,fs_write'
use_tools: null # Which tools to use by default. (e.g. 'fs,web_search')

# ---- RAG ----
# See [RAG-Guide](https://github.com/sigoden/aichat/wiki/RAG-Guide) for more details.
rag_embedding_model: null # Specifies the embedding model to use
rag_reranker_model: null # Specifies the rerank model to use
rag_top_k: 4 # Specifies the number of documents to retrieve
rag_chunk_size: null # Specifies the chunk size
rag_chunk_overlap: null # Specifies the chunk overlap
rag_min_score_vector_search: 0 # Specifies the minimum relevance score for vector-based searching
rag_min_score_keyword_search: 0 # Specifies the minimum relevance score for keyword-based searching
rag_min_score_rerank: 0 # Specifies the minimum relevance score for reranking
# Defines the query structure using variables like __CONTEXT__ and __INPUT__ to tailor searches to specific needs
rag_template: |
Use the following context as your learned knowledge, inside <context></context> XML tags.
<context>
__CONTEXT__
</context>
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.
Given the context information, answer the query.
Query: __INPUT__
# Define document loaders to control how RAG and `.file`/`--file` load files of specific formats.
document_loaders:
# You can add custom loaders using the following syntax:
# <file-extension>: <command-to-load-the-file>
# Note: Use `$1` for input file and `$2` for output file. If `$2` is omitted, use stdout as output.
pdf: 'pdftotext $1 -' # Load .pdf file, see https://poppler.freedesktop.org to set up pdftotext
docx: 'pandoc --to plain $1' # Load .docx file, see https://pandoc.org to set up pandoc

# ---- appearance ----
highlight: true # Controls syntax highlighting
light_theme: false # Activates a light color theme when true. env: AICHAT_LIGHT_THEME
# Custom REPL left/right prompts, see https://github.com/sigoden/aichat/wiki/Custom-REPL-Prompt for more details
left_prompt:
'{color.green}{?session {?agent {agent}>}{session}{?role /}}{!session {?agent {agent}>}}{role}{?rag @{rag}}{color.cyan}{?session )}{!session >}{color.reset} '
right_prompt:
'{color.purple}{?session {?consume_tokens {consume_tokens}({consume_percent}%)}{!consume_tokens {consume_tokens}}}{color.reset}'

# ---- clients ----
clients: []
# All clients have the following configuration:
# - type: xxxx
# name: xxxx # Only use it to distinguish clients with the same client type. Optional
# models:
# - name: xxxx # Chat model
# max_input_tokens: 100000
# supports_vision: true
# supports_function_calling: true
# - name: xxxx # Embedding model
# type: embedding
# max_input_tokens: 2048
# max_tokens_per_chunk: 2048
# default_chunk_size: 1500
# max_batch_size: 100
# - name: xxxx # Reranker model
# type: reranker
# max_input_tokens: 2048
# patch: # Patch api
# chat_completions: # Api type, possible values: chat_completions, embeddings, and rerank
# <regex>: # The regex to match model names, e.g. '.*' 'gpt-4o' 'gpt-4o|gpt-4-.*'
# url: '' # Patch request url
# body: # Patch request body
# <json>
# headers: # Patch request headers
# <key>: <value>
# extra:
# proxy: socks5://127.0.0.1:1080 # Set proxy
# connect_timeout: 10 # Set timeout in seconds for connect to api
8 changes: 8 additions & 0 deletions aichat/configs/aichat.dify.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: dify
api_base: http://dify-openai:3000/v1
apiKey: "${HARBOR_DIFY_OPENAI_WORKFLOW}"
models:
- name: ${HARBOR_AICHAT_MODEL}

8 changes: 8 additions & 0 deletions aichat/configs/aichat.litellm.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: litellm
api_base: http://litellm:3000/v1
apiKey: "${HARBOR_LITELLM_MASTER_KEY}"
models:
- name: ${HARBOR_AICHAT_MODEL}

8 changes: 8 additions & 0 deletions aichat/configs/aichat.llamacpp.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: llamacpp
api_base: http://llamacpp:8080/v1
apiKey: sk-llamacpp
models:
- name: ${HARBOR_AICHAT_MODEL}

8 changes: 8 additions & 0 deletions aichat/configs/aichat.mistralrs.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: mistralrs
api_base: http://mistralrs:8021/v1
apiKey: sk-mistralrs
models:
- name: ${HARBOR_AICHAT_MODEL}

8 changes: 8 additions & 0 deletions aichat/configs/aichat.ollama.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: ollama
api_base: http://ollama:11434/v1
api_key: sk-ollama
models:
- name: ${HARBOR_AICHAT_MODEL}

8 changes: 8 additions & 0 deletions aichat/configs/aichat.tabbyapi.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: tabbyapi
api_base: http://tabbyapi:5000/v1
apiKey: "${HARBOR_TABBYAPI_ADMIN_KEY}"
models:
- name: ${HARBOR_AICHAT_MODEL}

8 changes: 8 additions & 0 deletions aichat/configs/aichat.vllm.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
clients:
- type: openai-compatible
name: tabbyapi
api_base: http://vllm:8000/v1
apiKey: sk-vllm
models:
- name: ${HARBOR_AICHAT_MODEL}

19 changes: 19 additions & 0 deletions aichat/start_aichat.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#!/bin/bash

log() {
if [ "$HARBOR_LOG_LEVEL" == "DEBUG" ]; then
echo "$1"
fi
}

log "Harbor: custom aichat entrypoint"

log "YAML Merger is starting..."
mkdir -p /root/.config/aichat
python /app/yaml_config_merger.py --pattern ".yml" --output "/root/.config/aichat/config.yaml" --directory "/app/configs"

log "Merged Configs:"
log $(cat /root/.config/aichat/config.yaml)

log echo "Starting aichat with args: '$*'"
/app/aichat $@
20 changes: 20 additions & 0 deletions compose.aichat.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
services:
aichat:
container_name: ${HARBOR_CONTAINER_PREFIX}.aichat
build:
context: ./aichat
dockerfile: Dockerfile
env_file:
- ./.env
networks:
- harbor-network
volumes:
# Base config
- ./aichat/configs/aichat.config.yml:/app/configs/z.config.yml
# Custom entrypoint for config merging
- ./aichat/start_aichat.sh:/app/start_aichat.sh
- ./shared/yaml_config_merger.py:/app/yaml_config_merger.py
- ${HARBOR_AICHAT_CONFIG_PATH}:/root/.config/aichat
ports:
- ${HARBOR_AICHAT_HOST_PORT}:${HARBOR_AICHAT_HOST_PORT}
entrypoint: ["/app/start_aichat.sh"]
1 change: 1 addition & 0 deletions compose.ollama.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ services:
env_file: ./.env
volumes:
- ${HARBOR_OLLAMA_CACHE}:/root/.ollama
- ./ollama/modelfiles:/modelfiles
tty: true
image: ollama/ollama:${HARBOR_OLLAMA_VERSION}
ports:
Expand Down
4 changes: 4 additions & 0 deletions compose.x.aichat.ollama.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
services:
aichat:
volumes:
- ./aichat/configs/aichat.ollama.yml:/app/configs/ollama.yml
6 changes: 6 additions & 0 deletions default.env
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ HARBOR_CONTAINER_PREFIX="harbor"
HARBOR_CLI_NAME="harbor"
HARBOR_CLI_SHORT="h"
HARBOR_CLI_PATH="~/.local/bin"
HARBOR_LOG_LEVEL="INFO"

# OpenAI
# ---------------------
Expand Down Expand Up @@ -246,6 +247,11 @@ HARBOR_COMFYUI_PROVISIONING="https://raw.githubusercontent.com/av/harbor/main/co
HARBOR_PERPLEXICA_HOST_PORT=34041
HARBOR_PERPLEXICA_BACKEND_HOST_PORT=34042

# Aichat
HARBOR_AICHAT_HOST_PORT=34051
HARBOR_AICHAT_MODEL="llama3.1:8b"
HARBOR_AICHAT_CONFIG_PATH="~/.config/aichat"

# ============================================
# Service Configuration.
# You can specify any of the service's own environment variables here.
Expand Down
Loading

0 comments on commit 521e224

Please sign in to comment.