diff --git a/.github/workflows/generate-release.yml b/.github/workflows/generate-release.yml index b84c3fa4d..82d08d0b4 100644 --- a/.github/workflows/generate-release.yml +++ b/.github/workflows/generate-release.yml @@ -9,7 +9,7 @@ env: REGISTRY: docker.io IMAGE_NAME: ${{ github.repository }} platforms: linux/amd64,linux/arm64 - DEFAULT_TYPE: "external" + DEFAULT_TYPE: "ollama" jobs: build-and-push-image: @@ -17,7 +17,7 @@ jobs: strategy: matrix: - type: [ local, external ] + type: [ llamacpp-cpu, ollama ] permissions: contents: read diff --git a/Dockerfile.local b/Dockerfile.llamacpp-cpu similarity index 100% rename from Dockerfile.local rename to Dockerfile.llamacpp-cpu diff --git a/Dockerfile.external b/Dockerfile.ollama similarity index 100% rename from Dockerfile.external rename to Dockerfile.ollama diff --git a/docker-compose.yaml b/docker-compose.yaml index 63913678f..a5df4647e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -7,9 +7,10 @@ services: # Private-GPT service for the Ollama CPU and GPU modes # This service builds from an external Dockerfile and runs the Ollama mode. private-gpt-ollama: + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-ollama build: context: . - dockerfile: Dockerfile.external + dockerfile: Dockerfile.ollama volumes: - ./local_data/:/home/worker/app/local_data ports: @@ -20,18 +21,20 @@ services: PGPT_MODE: ollama PGPT_EMBED_MODE: ollama PGPT_OLLAMA_API_BASE: http://ollama:11434 + HF_TOKEN: ${HF_TOKEN:-} profiles: - "" - - ollama + - ollama-cpu - ollama-cuda - - ollama-host + - ollama-api # Private-GPT service for the local mode # This service builds from a local Dockerfile and runs the application in local mode. - private-gpt-local: + private-gpt-llamacpp-cpu: + image: ${PGPT_IMAGE:-zylonai/private-gpt}${PGPT_TAG:-0.6.1}-llamacpp-cpu build: context: . - dockerfile: Dockerfile.local + dockerfile: Dockerfile.llamacpp-cpu volumes: - ./local_data/:/home/worker/app/local_data - ./models/:/home/worker/app/models @@ -43,7 +46,7 @@ services: PGPT_PROFILES: local HF_TOKEN: ${HF_TOKEN} profiles: - - local + - llamacpp-cpu #----------------------------------- #---- Ollama services -------------- @@ -70,9 +73,9 @@ services: - "host.docker.internal:host-gateway" profiles: - "" - - ollama + - ollama-cpu - ollama-cuda - - ollama-host + - ollama-api # Ollama service for the CPU mode ollama-cpu: diff --git a/fern/docs/pages/quickstart/quickstart.mdx b/fern/docs/pages/quickstart/quickstart.mdx index 702d8ed75..09877ce25 100644 --- a/fern/docs/pages/quickstart/quickstart.mdx +++ b/fern/docs/pages/quickstart/quickstart.mdx @@ -1,85 +1,105 @@ This guide provides a quick start for running different profiles of PrivateGPT using Docker Compose. -The profiles cater to various environments, including Ollama setups (CPU, CUDA, MacOS) and fully Local setup. +The profiles cater to various environments, including Ollama setups (CPU, CUDA, MacOS), and a fully local setup. + +By default, Docker Compose will download pre-built images from a remote registry when starting the services. However, you have the option to build the images locally if needed. Details on building Docker image locally are provided at the end of this guide. If you want to run PrivateGPT locally without Docker, refer to the [Local Installation Guide](/installation). -#### Prerequisites +## Prerequisites - **Docker and Docker Compose:** Ensure both are installed on your system. -[Installation Guide for Docker](https://docs.docker.com/get-docker/), [Installation Guide for Docker Compose](https://docs.docker.com/compose/install/). + [Installation Guide for Docker](https://docs.docker.com/get-docker/), [Installation Guide for Docker Compose](https://docs.docker.com/compose/install/). - **Clone PrivateGPT Repository:** Clone the PrivateGPT repository to your machine and navigate to the directory: ```sh git clone https://github.com/zylon-ai/private-gpt.git cd private-gpt ``` ---- - -## Ollama Setups (Recommended) +## Setups -Ollama setups are recommended for their ease of use and optimized configurations. Ollama offers different profiles depending on your hardware capabilities and operating system. +### Ollama Setups (Recommended) -### 1. Default/Ollama CPU +#### 1. Default/Ollama CPU **Description:** This profile runs the Ollama service using CPU resources. It is the standard configuration for running Ollama-based Private-GPT services without GPU acceleration. **Run:** -To start the services, use either of the following commands: +To start the services using pre-built images, run: ```sh docker-compose up ``` -or +or with a specific profile: ```sh -docker-compose --profile ollama up +docker-compose --profile ollama-cpu up ``` -### 2. Ollama Nvidia CUDA +#### 2. Ollama Nvidia CUDA **Description:** This profile leverages GPU acceleration with CUDA support, suitable for computationally intensive tasks that benefit from GPU resources. **Requirements:** -- Ensure that your system has compatible GPU hardware and the necessary NVIDIA drivers installed. The installation process is detailed [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). +Ensure that your system has compatible GPU hardware and the necessary NVIDIA drivers installed. The installation process is detailed [here](https://docs.nvidia.com/cuda/cuda-installation-guide-microsoft-windows/index.html). **Run:** -To start the services with CUDA support, use: +To start the services with CUDA support using pre-built images, run: ```sh docker-compose --profile ollama-cuda up ``` -### 3. Ollama Host +#### 3. Ollama External API **Description:** This profile is designed for running PrivateGPT using Ollama installed on the host machine. This setup is particularly useful for MacOS users, as Docker does not yet support Metal GPU. **Requirements:** -- Install Ollama on your machine by following the instructions at [ollama.ai](https://ollama.ai/). -- Start the Ollama service with the command: +Install Ollama on your machine by following the instructions at [ollama.ai](https://ollama.ai/). + +**Run:** +To start the Ollama service, use: ```sh OLLAMA_HOST=0.0.0.0 ollama serve ``` - -**Run:** -To start the services with the host configuration, use: +To start the services with the host configuration using pre-built images, run: ```sh -docker-compose --profile ollama-host up +docker-compose --profile ollama-api up ``` ---- - -## Fully Local Setups +### Fully Local Setups -### LlamaCPP + HuggingFace Embeddings +#### 1. LlamaCPP CPU **Description:** This profile runs the Private-GPT services locally using `llama-cpp` and Hugging Face models. **Requirements:** -- **Hugging Face Token (HF_TOKEN):** Required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models). +A **Hugging Face Token (HF_TOKEN)** is required for accessing Hugging Face models. Obtain your token following [this guide](/installation/getting-started/troubleshooting#downloading-gated-and-private-models). **Run:** -Start the services with your Hugging Face token: +Start the services with your Hugging Face token using pre-built images: +```sh +HF_TOKEN= docker-compose --profile llamacpp-cpu up +``` +Replace `` with your actual Hugging Face token. + +## Building Locally + +If you prefer to build Docker images locally, which is useful when making changes to the codebase or the Dockerfiles, follow these steps: + +### Building Locally +To build the Docker images locally, navigate to the cloned repository directory and run: +```sh +docker-compose build +``` +This command compiles the necessary Docker images based on the current codebase and Dockerfile configurations. + +### Forcing a Rebuild with --build +If you have made changes and need to ensure these changes are reflected in the Docker images, you can force a rebuild before starting the services: +```sh +docker-compose up --build +``` +or with a specific profile: ```sh -HF_TOKEN= docker-compose up --profile local +docker-compose --profile up --build ``` -Replace `` with your actual Hugging Face token. \ No newline at end of file +Replace `` with the desired profile. \ No newline at end of file