diff --git a/container-images/ramalama/39-nvidia/Containerfile b/container-images/ramalama/39-nvidia/Containerfile index 0cab11a..a864395 100644 --- a/container-images/ramalama/39-nvidia/Containerfile +++ b/container-images/ramalama/39-nvidia/Containerfile @@ -16,7 +16,7 @@ ENV LLAMA_CURL=1 ENV LLAMA_VULKAN=1 ENV GGML_CUDA=1 -RUN git clone -b podman-llm https://github.com/ericcurtin/llama.cpp.git && \ +RUN git clone -b ramlama https://github.com/ericcurtin/llama.cpp.git && \ cd llama.cpp && \ cmake -B build -DLLAMA_CCACHE=0 -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 \ -DGGML_CUDA=1 && \ diff --git a/container-images/ramalama/39/Containerfile b/container-images/ramalama/39/Containerfile index fd8425b..63bc165 100644 --- a/container-images/ramalama/39/Containerfile +++ b/container-images/ramalama/39/Containerfile @@ -21,7 +21,7 @@ ENV LLAMA_CCACHE=0 ENV LLAMA_CURL=1 ENV LLAMA_VULKAN=1 -RUN git clone -b podman-llm https://github.com/ericcurtin/llama.cpp.git && \ +RUN git clone -b ramlama https://github.com/ericcurtin/llama.cpp.git && \ cd llama.cpp && \ cmake -B build -DLLAMA_CCACHE=0 -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \ cmake --build build --config Release -j $(nproc) && \ diff --git a/container-images/ramalama/41-amd/Containerfile b/container-images/ramalama/41-amd/Containerfile index 434a0c6..c3c0352 100644 --- a/container-images/ramalama/41-amd/Containerfile +++ b/container-images/ramalama/41-amd/Containerfile @@ -23,7 +23,7 @@ ENV LLAMA_CURL=1 ENV LLAMA_VULKAN=1 ENV GGML_HIPBLAS=1 -RUN git clone -b podman-llm https://github.com/ericcurtin/llama.cpp.git && \ +RUN git clone -b ramlama https://github.com/ericcurtin/llama.cpp.git && \ cd llama.cpp && \ cmake -B build -DLLAMA_CCACHE=0 -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 \ -DGGML_HIPBLAS=1 && \ diff --git a/container-images/ramalama/latest-amd/Containerfile b/container-images/ramalama/latest-amd/Containerfile index 434a0c6..c3c0352 100644 --- a/container-images/ramalama/latest-amd/Containerfile +++ b/container-images/ramalama/latest-amd/Containerfile @@ -23,7 +23,7 @@ ENV LLAMA_CURL=1 ENV LLAMA_VULKAN=1 ENV GGML_HIPBLAS=1 -RUN git clone -b podman-llm https://github.com/ericcurtin/llama.cpp.git && \ +RUN git clone -b ramlama https://github.com/ericcurtin/llama.cpp.git && \ cd llama.cpp && \ cmake -B build -DLLAMA_CCACHE=0 -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 \ -DGGML_HIPBLAS=1 && \ diff --git a/container-images/ramalama/latest-nvidia/Containerfile b/container-images/ramalama/latest-nvidia/Containerfile index 0cab11a..a864395 100644 --- a/container-images/ramalama/latest-nvidia/Containerfile +++ b/container-images/ramalama/latest-nvidia/Containerfile @@ -16,7 +16,7 @@ ENV LLAMA_CURL=1 ENV LLAMA_VULKAN=1 ENV GGML_CUDA=1 -RUN git clone -b podman-llm https://github.com/ericcurtin/llama.cpp.git && \ +RUN git clone -b ramlama https://github.com/ericcurtin/llama.cpp.git && \ cd llama.cpp && \ cmake -B build -DLLAMA_CCACHE=0 -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 \ -DGGML_CUDA=1 && \ diff --git a/container-images/ramalama/latest/Containerfile b/container-images/ramalama/latest/Containerfile index fd8425b..63bc165 100644 --- a/container-images/ramalama/latest/Containerfile +++ b/container-images/ramalama/latest/Containerfile @@ -21,7 +21,7 @@ ENV LLAMA_CCACHE=0 ENV LLAMA_CURL=1 ENV LLAMA_VULKAN=1 -RUN git clone -b podman-llm https://github.com/ericcurtin/llama.cpp.git && \ +RUN git clone -b ramlama https://github.com/ericcurtin/llama.cpp.git && \ cd llama.cpp && \ cmake -B build -DLLAMA_CCACHE=0 -DLLAMA_VULKAN=1 -DLLAMA_CURL=1 && \ cmake --build build --config Release -j $(nproc) && \