From f168a6cb2e0dbc1d3dacc7bcf2305e4b67e9cc03 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 11:38:36 +0200 Subject: [PATCH 01/11] chore: update docker-compose with profiles --- .docker/router.yml | 16 +++++++++ docker-compose.yaml | 88 ++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 .docker/router.yml diff --git a/.docker/router.yml b/.docker/router.yml new file mode 100644 index 000000000..3b55df9ef --- /dev/null +++ b/.docker/router.yml @@ -0,0 +1,16 @@ +http: + services: + ollama: + loadBalancer: + healthCheck: + interval: 5s + path: / + servers: + - url: http://ollama-cpu:11434 + - url: http://ollama-cuda:11434 + - url: http://host.docker.internal:11434 + + routers: + ollama-router: + rule: "PathPrefix(`/`)" + service: ollama \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index 517af6590..63913678f 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,19 +1,99 @@ services: - private-gpt: + + #----------------------------------- + #---- Private-GPT services --------- + #----------------------------------- + + # Private-GPT service for the Ollama CPU and GPU modes + # This service builds from an external Dockerfile and runs the Ollama mode. + private-gpt-ollama: build: + context: . dockerfile: Dockerfile.external volumes: - ./local_data/:/home/worker/app/local_data ports: - - 8001:8001 + - "8001:8001" environment: PORT: 8001 PGPT_PROFILES: docker PGPT_MODE: ollama PGPT_EMBED_MODE: ollama + PGPT_OLLAMA_API_BASE: http://ollama:11434 + profiles: + - "" + - ollama + - ollama-cuda + - ollama-host + + # Private-GPT service for the local mode + # This service builds from a local Dockerfile and runs the application in local mode. + private-gpt-local: + build: + context: . + dockerfile: Dockerfile.local + volumes: + - ./local_data/:/home/worker/app/local_data + - ./models/:/home/worker/app/models + entrypoint: sh -c ".venv/bin/python scripts/setup && .venv/bin/python -m private_gpt" + ports: + - "8001:8001" + environment: + PORT: 8001 + PGPT_PROFILES: local + HF_TOKEN: ${HF_TOKEN} + profiles: + - local + + #----------------------------------- + #---- Ollama services -------------- + #----------------------------------- + + # Traefik reverse proxy for the Ollama service + # This will route requests to the Ollama service based on the profile. ollama: - image: ollama/ollama:latest + image: traefik:v2.10 ports: - - 11434:11434 + - "11435:11434" + - "8081:8080" + command: + - "--providers.file.filename=/etc/router.yml" + - "--log.level=ERROR" + - "--api.insecure=true" + - "--providers.docker=true" + - "--providers.docker.exposedbydefault=false" + - "--entrypoints.web.address=:11434" + volumes: + - /var/run/docker.sock:/var/run/docker.sock:ro + - ./.docker/router.yml:/etc/router.yml:ro + extra_hosts: + - "host.docker.internal:host-gateway" + profiles: + - "" + - ollama + - ollama-cuda + - ollama-host + + # Ollama service for the CPU mode + ollama-cpu: + image: ollama/ollama:latest + volumes: + - ./models:/root/.ollama + profiles: + - "" + - ollama + + # Ollama service for the CUDA mode + ollama-cuda: + image: ollama/ollama:latest volumes: - ./models:/root/.ollama + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + profiles: + - ollama-cuda \ No newline at end of file From 9fba59501e618c046bb96212184fd4866d116bb8 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 11:41:29 +0200 Subject: [PATCH 02/11] docs: add quick start doc --- fern/docs.yml | 9 +++ fern/docs/pages/quickstart/quickstart.mdx | 85 +++++++++++++++++++++++ 2 files changed, 94 insertions(+) create mode 100644 fern/docs/pages/quickstart/quickstart.mdx diff --git a/fern/docs.yml b/fern/docs.yml index e0a5c423b..d13822c84 100644 --- a/fern/docs.yml +++ b/fern/docs.yml @@ -10,6 +10,9 @@ tabs: overview: display-name: Overview icon: "fa-solid fa-home" + quickstart: + display-name: Quickstart + icon: "fa-solid fa-rocket" installation: display-name: Installation icon: "fa-solid fa-download" @@ -32,6 +35,12 @@ navigation: contents: - page: Introduction path: ./docs/pages/overview/welcome.mdx + - tab: quickstart + layout: + - section: Getting started + contents: + - page: Quickstart + path: ./docs/pages/quickstart/quickstart.mdx # How to install PrivateGPT, with FAQ and troubleshooting - tab: installation layout: diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx new file mode 100644 index 000000000..702d8ed75 --- /dev/null +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -0,0 +1,85 @@ +This guide provides a quick start for running different profiles of PrivateGPT using Docker Compose. +The profiles cater to various environments, including Ollama setups (CPU, CUDA, MacOS) and fully Local setup. + +If you want to run PrivateGPT locally without Docker, refer to the [Local Installation Guide](/installation). + +#### Prerequisites +- **Docker and Docker Compose:** Ensure both are installed on your system. +[Installation Guide for Docker](https://docs.docker.com/get-docker/), [Installation Guide for Docker Compose](https://docs.docker.com/compose/install/). +- **Clone PrivateGPT Repository:** Clone the PrivateGPT repository to your machine and navigate to the directory: + ```sh + git clone https://github.com/zylon-ai/private-gpt.git + cd private-gpt + ``` + +--- + +## Ollama Setups (Recommended) + +Ollama setups are recommended for their ease of use and optimized configurations. Ollama offers different profiles depending on your hardware capabilities and operating system. + +### 1. Default/Ollama CPU + +**Description:** +This profile runs the Ollama service using CPU resources. It is the standard configuration for running Ollama-based Private-GPT services without GPU acceleration. + +**Run:** +To start the services, use either of the following commands: +```sh +docker-compose up +``` +or +```sh +docker-compose --profile ollama up +``` + +### 2. Ollama Nvidia CUDA + +**Description:** +This profile leverages GPU acceleration with CUDA support, suitable for computationally intensive tasks that benefit from GPU resources. + +**Requirements:** +- Ensure that your system has compatible GPU hardware and the necessary NVIDIA drivers installed. The installation process is detailed [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). + +**Run:** +To start the services with CUDA support, use: +```sh +docker-compose --profile ollama-cuda up +``` + +### 3. Ollama Host + +**Description:** +This profile is designed for running PrivateGPT using Ollama installed on the host machine. This setup is particularly useful for MacOS users, as Docker does not yet support Metal GPU. + +**Requirements:** +- Install Ollama on your machine by following the instructions at [ollama.ai](https://ollama.ai/). +- Start the Ollama service with the command: +```sh +OLLAMA_HOST=0.0.0.0 ollama serve +``` + +**Run:** +To start the services with the host configuration, use: +```sh +docker-compose --profile ollama-host up +``` + +--- + +## Fully Local Setups + +### LlamaCPP + HuggingFace Embeddings + +**Description:** +This profile runs the Private-GPT services locally using `llama-cpp` and Hugging Face models. + +**Requirements:** +- **Hugging Face Token (HF_TOKEN):** Required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models). + +**Run:** +Start the services with your Hugging Face token: +```sh +HF_TOKEN= docker-compose up --profile local +``` +Replace `` with your actual Hugging Face token. \ No newline at end of file From ec6149ac2b29387212c44322ccd7088fa10845ca Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 11:50:19 +0200 Subject: [PATCH 03/11] chore: generate docker release when new version is released --- .github/workflows/docker.yml | 45 -------------- .github/workflows/generate-release.yml | 83 ++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 45 deletions(-) delete mode 100644 .github/workflows/docker.yml create mode 100644 .github/workflows/generate-release.yml diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml deleted file mode 100644 index 259cf5da2..000000000 --- a/.github/workflows/docker.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: docker - -on: - release: - types: [ published ] - workflow_dispatch: - -env: - REGISTRY: ghcr.io - IMAGE_NAME: ${{ github.repository }} - -jobs: - build-and-push-image: - runs-on: ubuntu-latest - permissions: - contents: read - packages: write - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - name: Log in to the Container registry - uses: docker/login-action@v3 - with: - registry: ${{ env.REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} - type=semver,pattern={{major}}.{{minor}} - type=sha - - name: Build and push Docker image - uses: docker/build-push-action@v5 - with: - context: . - file: Dockerfile.external - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/generate-release.yml b/.github/workflows/generate-release.yml new file mode 100644 index 000000000..b84c3fa4d --- /dev/null +++ b/.github/workflows/generate-release.yml @@ -0,0 +1,83 @@ +name: generate-release + +on: + release: + types: [ published ] + workflow_dispatch: + +env: + REGISTRY: docker.io + IMAGE_NAME: ${{ github.repository }} + platforms: linux/amd64,linux/arm64 + DEFAULT_TYPE: "external" + +jobs: + build-and-push-image: + runs-on: ubuntu-latest + + strategy: + matrix: + type: [ local, external ] + + permissions: + contents: read + packages: write + + outputs: + version: ${{ steps.version.outputs.version }} + + steps: + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: false + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: false + swap-storage: true + + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=semver,pattern={{version}},enable=${{ matrix.type == env.DEFAULT_TYPE }} + type=semver,pattern={{version}}-${{ matrix.type }} + type=semver,pattern={{major}}.{{minor}},enable=${{ matrix.type == env.DEFAULT_TYPE }} + type=semver,pattern={{major}}.{{minor}}-${{ matrix.type }} + type=raw,value=latest,enable=${{ matrix.type == env.DEFAULT_TYPE }} + type=sha + flavor: | + latest=false + + - name: Build and push Docker image + uses: docker/build-push-action@v6 + with: + context: . + file: Dockerfile.${{ matrix.type }} + platforms: ${{ env.platforms }} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + - name: Version output + id: version + run: echo "version=${{ steps.meta.outputs.version }}" >> "$GITHUB_OUTPUT" \ No newline at end of file From af380929965c4234de9705b1d64d5d1e9f33748b Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 12:11:39 +0200 Subject: [PATCH 04/11] chore: add dockerhub image in docker-compose --- docker-compose.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index 63913678f..a3d516726 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,6 +7,7 @@ services: # Private-GPT service for the Ollama CPU and GPU modes # This service builds from an external Dockerfile and runs the Ollama mode. private-gpt-ollama: + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.0}-external build: context: . dockerfile: Dockerfile.external @@ -29,6 +30,7 @@ services: # Private-GPT service for the local mode # This service builds from a local Dockerfile and runs the application in local mode. private-gpt-local: + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.0}-local build: context: . dockerfile: Dockerfile.local From 4958cccecdba43abd7d8341ee42e8fee783bb918 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 12:11:52 +0200 Subject: [PATCH 05/11] docs: update quickstart with local/remote images --- fern/docs/pages/quickstart/quickstart.mdx | 72 +++++++++++++++-------- 1 file changed, 46 insertions(+), 26 deletions(-) diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx index 702d8ed75..d308cc92a 100644 --- a/fern/docs/pages/quickstart/quickstart.mdx +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -1,85 +1,105 @@ This guide provides a quick start for running different profiles of PrivateGPT using Docker Compose. -The profiles cater to various environments, including Ollama setups (CPU, CUDA, MacOS) and fully Local setup. +The profiles cater to various environments, including Ollama setups (CPU, CUDA, MacOS), and a fully local setup. + +By default, Docker Compose will download pre-built images from a remote registry when starting the services. However, you have the option to build the images locally if needed. Details on building Docker image locally are provided at the end of this guide. If you want to run PrivateGPT locally without Docker, refer to the [Local Installation Guide](/installation). -#### Prerequisites +## Prerequisites - **Docker and Docker Compose:** Ensure both are installed on your system. -[Installation Guide for Docker](https://docs.docker.com/get-docker/), [Installation Guide for Docker Compose](https://docs.docker.com/compose/install/). + [Installation Guide for Docker](https://docs.docker.com/get-docker/), [Installation Guide for Docker Compose](https://docs.docker.com/compose/install/). - **Clone PrivateGPT Repository:** Clone the PrivateGPT repository to your machine and navigate to the directory: ```sh git clone https://github.com/zylon-ai/private-gpt.git cd private-gpt ``` ---- - -## Ollama Setups (Recommended) +## Setups -Ollama setups are recommended for their ease of use and optimized configurations. Ollama offers different profiles depending on your hardware capabilities and operating system. +### Ollama Setups (Recommended) -### 1. Default/Ollama CPU +#### 1. Default/Ollama CPU **Description:** This profile runs the Ollama service using CPU resources. It is the standard configuration for running Ollama-based Private-GPT services without GPU acceleration. **Run:** -To start the services, use either of the following commands: +To start the services using pre-built images, run: ```sh docker-compose up ``` -or +or with a specific profile: ```sh docker-compose --profile ollama up ``` -### 2. Ollama Nvidia CUDA +#### 2. Ollama Nvidia CUDA **Description:** This profile leverages GPU acceleration with CUDA support, suitable for computationally intensive tasks that benefit from GPU resources. **Requirements:** -- Ensure that your system has compatible GPU hardware and the necessary NVIDIA drivers installed. The installation process is detailed [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). +Ensure that your system has compatible GPU hardware and the necessary NVIDIA drivers installed. The installation process is detailed [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). **Run:** -To start the services with CUDA support, use: +To start the services with CUDA support using pre-built images, run: ```sh docker-compose --profile ollama-cuda up ``` -### 3. Ollama Host +#### 3. Ollama Host **Description:** This profile is designed for running PrivateGPT using Ollama installed on the host machine. This setup is particularly useful for MacOS users, as Docker does not yet support Metal GPU. **Requirements:** -- Install Ollama on your machine by following the instructions at [ollama.ai](https://ollama.ai/). -- Start the Ollama service with the command: +Install Ollama on your machine by following the instructions at [ollama.ai](https://ollama.ai/). + +**Run:** +To start the Ollama service, use: ```sh OLLAMA_HOST=0.0.0.0 ollama serve ``` - -**Run:** -To start the services with the host configuration, use: +To start the services with the host configuration using pre-built images, run: ```sh docker-compose --profile ollama-host up ``` ---- - -## Fully Local Setups +### Fully Local Setups -### LlamaCPP + HuggingFace Embeddings +#### LlamaCPP + HuggingFace Embeddings **Description:** This profile runs the Private-GPT services locally using `llama-cpp` and Hugging Face models. **Requirements:** -- **Hugging Face Token (HF_TOKEN):** Required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models). +A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models). **Run:** -Start the services with your Hugging Face token: +Start the services with your Hugging Face token using pre-built images: ```sh HF_TOKEN= docker-compose up --profile local ``` -Replace `` with your actual Hugging Face token. \ No newline at end of file +Replace `` with your actual Hugging Face token. + +## Building Locally + +If you prefer to build Docker images locally, which is useful when making changes to the codebase or the Dockerfiles, follow these steps: + +### Building Locally +To build the Docker images locally, navigate to the cloned repository directory and run: +```sh +docker-compose build +``` +This command compiles the necessary Docker images based on the current codebase and Dockerfile configurations. + +### Forcing a Rebuild with --build +If you have made changes and need to ensure these changes are reflected in the Docker images, you can force a rebuild before starting the services: +```sh +docker-compose up --build +``` +or with a specific profile: +```sh +docker-compose --profile up --build +``` +Replace `` with the desired profile. \ No newline at end of file From 0f67e569065d36c84ec7af2358f30355f32c8765 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 16:34:08 +0200 Subject: [PATCH 06/11] chore: update docker tag --- docker-compose.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index a3d516726..cd623f49a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,7 +7,7 @@ services: # Private-GPT service for the Ollama CPU and GPU modes # This service builds from an external Dockerfile and runs the Ollama mode. private-gpt-ollama: - image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.0}-external + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-external build: context: . dockerfile: Dockerfile.external @@ -30,7 +30,7 @@ services: # Private-GPT service for the local mode # This service builds from a local Dockerfile and runs the application in local mode. private-gpt-local: - image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.0}-local + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-local build: context: . dockerfile: Dockerfile.local From 5a5b2123ff053a57a1566c889cd8e551ebc9b777 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 16:52:53 +0200 Subject: [PATCH 07/11] chore: refactor dockerfile names --- .github/workflows/generate-release.yml | 4 ++-- Dockerfile.local => Dockerfile.llamacpp | 0 Dockerfile.external => Dockerfile.ollama | 0 docker-compose.yaml | 4 ++-- 4 files changed, 4 insertions(+), 4 deletions(-) rename Dockerfile.local => Dockerfile.llamacpp (100%) rename Dockerfile.external => Dockerfile.ollama (100%) diff --git a/.github/workflows/generate-release.yml b/.github/workflows/generate-release.yml index b84c3fa4d..ebcd0c988 100644 --- a/.github/workflows/generate-release.yml +++ b/.github/workflows/generate-release.yml @@ -9,7 +9,7 @@ env: REGISTRY: docker.io IMAGE_NAME: ${{ github.repository }} platforms: linux/amd64,linux/arm64 - DEFAULT_TYPE: "external" + DEFAULT_TYPE: "ollama" jobs: build-and-push-image: @@ -17,7 +17,7 @@ jobs: strategy: matrix: - type: [ local, external ] + type: [ llamacpp, ollama ] permissions: contents: read diff --git a/Dockerfile.local b/Dockerfile.llamacpp similarity index 100% rename from Dockerfile.local rename to Dockerfile.llamacpp diff --git a/Dockerfile.external b/Dockerfile.ollama similarity index 100% rename from Dockerfile.external rename to Dockerfile.ollama diff --git a/docker-compose.yaml b/docker-compose.yaml index cd623f49a..21b220956 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -10,7 +10,7 @@ services: image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-external build: context: . - dockerfile: Dockerfile.external + dockerfile: Dockerfile.ollama volumes: - ./local_data/:/home/worker/app/local_data ports: @@ -33,7 +33,7 @@ services: image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-local build: context: . - dockerfile: Dockerfile.local + dockerfile: Dockerfile.llamacpp volumes: - ./local_data/:/home/worker/app/local_data - ./models/:/home/worker/app/models From e54fac0a9cc30e6c179490a12e278f93e7527b9d Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 16:53:16 +0200 Subject: [PATCH 08/11] chore: update docker-compose names --- docker-compose.yaml | 11 ++++++----- fern/docs/pages/quickstart/quickstart.mdx | 8 ++++---- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index 21b220956..8c6943813 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -21,11 +21,12 @@ services: PGPT_MODE: ollama PGPT_EMBED_MODE: ollama PGPT_OLLAMA_API_BASE: http://ollama:11434 + HF_TOKEN: ${HF_TOKEN:-} profiles: - "" - - ollama + - ollama-cpu - ollama-cuda - - ollama-host + - ollama-api # Private-GPT service for the local mode # This service builds from a local Dockerfile and runs the application in local mode. @@ -45,7 +46,7 @@ services: PGPT_PROFILES: local HF_TOKEN: ${HF_TOKEN} profiles: - - local + - llamacpp #----------------------------------- #---- Ollama services -------------- @@ -72,9 +73,9 @@ services: - "host.docker.internal:host-gateway" profiles: - "" - - ollama + - ollama-cpu - ollama-cuda - - ollama-host + - ollama-api # Ollama service for the CPU mode ollama-cpu: diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx index d308cc92a..e5f6c46a7 100644 --- a/fern/docs/pages/quickstart/quickstart.mdx +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -30,7 +30,7 @@ docker-compose up ``` or with a specific profile: ```sh -docker-compose --profile ollama up +docker-compose --profile ollama-cpu up ``` #### 2. Ollama Nvidia CUDA @@ -47,7 +47,7 @@ To start the services with CUDA support using pre-built images, run: docker-compose --profile ollama-cuda up ``` -#### 3. Ollama Host +#### 3. Ollama External API **Description:** This profile is designed for running PrivateGPT using Ollama installed on the host machine. This setup is particularly useful for MacOS users, as Docker does not yet support Metal GPU. @@ -62,7 +62,7 @@ OLLAMA_HOST=0.0.0.0 ollama serve ``` To start the services with the host configuration using pre-built images, run: ```sh -docker-compose --profile ollama-host up +docker-compose --profile ollama-api up ``` ### Fully Local Setups @@ -78,7 +78,7 @@ A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face model **Run:** Start the services with your Hugging Face token using pre-built images: ```sh -HF_TOKEN= docker-compose up --profile local +HF_TOKEN= docker-compose up --profile llamacpp ``` Replace `` with your actual Hugging Face token. From e5ccf1fd348a3731caa0935c19456ce9cafbdab7 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 17:02:37 +0200 Subject: [PATCH 09/11] docs: update llamacpp naming --- fern/docs/pages/quickstart/quickstart.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx index e5f6c46a7..70423e453 100644 --- a/fern/docs/pages/quickstart/quickstart.mdx +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -67,7 +67,7 @@ docker-compose --profile ollama-api up ### Fully Local Setups -#### LlamaCPP + HuggingFace Embeddings +#### 1. LlamaCPP CPU **Description:** This profile runs the Private-GPT services locally using `llama-cpp` and Hugging Face models. From 93b5023537148813c87d2cd1758ddfa1c146c0d1 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 17:08:42 +0200 Subject: [PATCH 10/11] fix: naming --- .github/workflows/generate-release.yml | 2 +- Dockerfile.llamacpp => Dockerfile.llamacpp-cpu | 0 docker-compose.yaml | 10 +++++----- fern/docs/pages/quickstart/quickstart.mdx | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) rename Dockerfile.llamacpp => Dockerfile.llamacpp-cpu (100%) diff --git a/.github/workflows/generate-release.yml b/.github/workflows/generate-release.yml index ebcd0c988..82d08d0b4 100644 --- a/.github/workflows/generate-release.yml +++ b/.github/workflows/generate-release.yml @@ -17,7 +17,7 @@ jobs: strategy: matrix: - type: [ llamacpp, ollama ] + type: [ llamacpp-cpu, ollama ] permissions: contents: read diff --git a/Dockerfile.llamacpp b/Dockerfile.llamacpp-cpu similarity index 100% rename from Dockerfile.llamacpp rename to Dockerfile.llamacpp-cpu diff --git a/docker-compose.yaml b/docker-compose.yaml index 8c6943813..a5df4647e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,7 +7,7 @@ services: # Private-GPT service for the Ollama CPU and GPU modes # This service builds from an external Dockerfile and runs the Ollama mode. private-gpt-ollama: - image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-external + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-ollama build: context: . dockerfile: Dockerfile.ollama @@ -30,11 +30,11 @@ services: # Private-GPT service for the local mode # This service builds from a local Dockerfile and runs the application in local mode. - private-gpt-local: - image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-local + private-gpt-llamacpp-cpu: + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-llamacpp-cpu build: context: . - dockerfile: Dockerfile.llamacpp + dockerfile: Dockerfile.llamacpp-cpu volumes: - ./local_data/:/home/worker/app/local_data - ./models/:/home/worker/app/models @@ -46,7 +46,7 @@ services: PGPT_PROFILES: local HF_TOKEN: ${HF_TOKEN} profiles: - - llamacpp + - llamacpp-cpu #----------------------------------- #---- Ollama services -------------- diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx index 70423e453..9bcb8804b 100644 --- a/fern/docs/pages/quickstart/quickstart.mdx +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -78,7 +78,7 @@ A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face model **Run:** Start the services with your Hugging Face token using pre-built images: ```sh -HF_TOKEN= docker-compose up --profile llamacpp +HF_TOKEN= docker-compose up --profile llamacpp-cpu ``` Replace `` with your actual Hugging Face token. From d07c4955f8d7c036d10f6e1f2c7e2865096661e4 Mon Sep 17 00:00:00 2001 From: Javier Martinez Date: Mon, 5 Aug 2024 17:11:04 +0200 Subject: [PATCH 11/11] docs: fix llamacpp command --- fern/docs/pages/quickstart/quickstart.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx index 9bcb8804b..09877ce25 100644 --- a/fern/docs/pages/quickstart/quickstart.mdx +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -78,7 +78,7 @@ A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face model **Run:** Start the services with your Hugging Face token using pre-built images: ```sh -HF_TOKEN= docker-compose up --profile llamacpp-cpu +HF_TOKEN= docker-compose --profile llamacpp-cpu up ``` Replace `` with your actual Hugging Face token.