diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index e824136a56e16..93669d5312214 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -688,8 +688,6 @@ jobs:
env:
OPENBLAS_VERSION: 0.3.23
- OPENCL_VERSION: 2023.04.17
- CLBLAST_VERSION: 1.6.0
SDE_VERSION: 9.33.0-2024-01-07
VULKAN_VERSION: 1.3.261.1
@@ -706,8 +704,6 @@ jobs:
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON'
- build: 'avx512-x64'
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON'
- - build: 'clblast-x64'
- defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"'
- build: 'openblas-x64'
defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
- build: 'kompute-x64'
@@ -732,27 +728,6 @@ jobs:
run: |
git submodule update --init kompute
- - name: Download OpenCL SDK
- id: get_opencl
- if: ${{ matrix.build == 'clblast-x64' }}
- run: |
- curl.exe -o $env:RUNNER_TEMP/opencl.zip -L "https://github.com/KhronosGroup/OpenCL-SDK/releases/download/v${env:OPENCL_VERSION}/OpenCL-SDK-v${env:OPENCL_VERSION}-Win-x64.zip"
- mkdir $env:RUNNER_TEMP/opencl
- tar.exe -xvf $env:RUNNER_TEMP/opencl.zip --strip-components=1 -C $env:RUNNER_TEMP/opencl
-
- - name: Download CLBlast
- id: get_clblast
- if: ${{ matrix.build == 'clblast-x64' }}
- run: |
- curl.exe -o $env:RUNNER_TEMP/clblast.7z -L "https://github.com/CNugteren/CLBlast/releases/download/${env:CLBLAST_VERSION}/CLBlast-${env:CLBLAST_VERSION}-windows-x64.7z"
- curl.exe -o $env:RUNNER_TEMP/CLBlast.LICENSE.txt -L "https://github.com/CNugteren/CLBlast/raw/${env:CLBLAST_VERSION}/LICENSE"
- 7z x "-o${env:RUNNER_TEMP}" $env:RUNNER_TEMP/clblast.7z
- rename-item $env:RUNNER_TEMP/CLBlast-${env:CLBLAST_VERSION}-windows-x64 clblast
- foreach ($f in (gci -Recurse -Path "$env:RUNNER_TEMP/clblast" -Filter '*.cmake')) {
- $txt = Get-Content -Path $f -Raw
- $txt.Replace('C:/vcpkg/packages/opencl_x64-windows/', "$($env:RUNNER_TEMP.Replace('\','/'))/opencl/") | Set-Content -Path $f -Encoding UTF8
- }
-
- name: Download OpenBLAS
id: get_openblas
if: ${{ matrix.build == 'openblas-x64' }}
@@ -786,13 +761,6 @@ jobs:
cmake -S . -B build ${{ matrix.defines }}
cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS}
- - name: Add clblast.dll
- id: add_clblast_dll
- if: ${{ matrix.build == 'clblast-x64' }}
- run: |
- cp $env:RUNNER_TEMP/clblast/lib/clblast.dll ./build/bin/Release
- cp $env:RUNNER_TEMP/CLBlast.LICENSE.txt ./build/bin/Release/CLBlast-${env:CLBLAST_VERSION}.txt
-
- name: Add libopenblas.dll
id: add_libopenblas_dll
if: ${{ matrix.build == 'openblas-x64' }}
@@ -816,7 +784,7 @@ jobs:
- name: Test
id: cmake_test
# not all machines have native AVX-512
- if: ${{ matrix.build != 'msvc-arm64' && matrix.build != 'llvm-arm64' && matrix.build != 'clblast-x64' && matrix.build != 'kompute-x64' && matrix.build != 'vulkan-x64' && (matrix.build != 'avx512-x64' || env.HAS_AVX512F == '1') }}
+ if: ${{ matrix.build != 'msvc-arm64' && matrix.build != 'llvm-arm64' && matrix.build != 'kompute-x64' && matrix.build != 'vulkan-x64' && (matrix.build != 'avx512-x64' || env.HAS_AVX512F == '1') }}
run: |
cd build
ctest -L main -C Release --verbose --timeout 900
@@ -1071,7 +1039,7 @@ jobs:
# hypervisor: 'qemu'
# run: |
# sudo pkg update
-# sudo pkg install -y gmake automake autoconf pkgconf llvm15 clinfo clover opencl clblast openblas
+# sudo pkg install -y gmake automake autoconf pkgconf llvm15 openblas
# gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j `sysctl -n hw.ncpu`
release:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 76ea27412caf3..cf37d5bb242ac 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -111,7 +111,6 @@ option(LLAMA_CUDA_FA_ALL_QUANTS "llama: compile all quants for Flas
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
-option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
option(LLAMA_VULKAN "llama: use Vulkan" OFF)
option(LLAMA_VULKAN_CHECK_RESULTS "llama: run Vulkan op checks" OFF)
option(LLAMA_VULKAN_DEBUG "llama: enable Vulkan debug output" OFF)
@@ -502,22 +501,6 @@ if (LLAMA_RPC)
set(GGML_SOURCES_RPC ggml-rpc.cpp)
endif()
-if (LLAMA_CLBLAST)
- find_package(CLBlast)
- if (CLBlast_FOUND)
- message(STATUS "CLBlast found")
-
- set(GGML_HEADERS_OPENCL ggml-opencl.h)
- set(GGML_SOURCES_OPENCL ggml-opencl.cpp)
-
- add_compile_definitions(GGML_USE_CLBLAST)
-
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} clblast)
- else()
- message(WARNING "CLBlast not found")
- endif()
-endif()
-
if (LLAMA_VULKAN)
find_package(Vulkan)
if (Vulkan_FOUND)
@@ -1265,7 +1248,6 @@ add_library(ggml OBJECT
ggml-quants.c
ggml-quants.h
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
- ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
${GGML_SOURCES_RPC} ${GGML_HEADERS_RPC}
${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
@@ -1353,8 +1335,9 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/LlamaConfig.cmake
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/Llama)
set(GGML_PUBLIC_HEADERS "ggml.h" "ggml-alloc.h" "ggml-backend.h"
- "${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}"
- "${GGML_HEADERS_METAL}" "${GGML_HEADERS_EXTRA}")
+ "${GGML_HEADERS_CUDA}"
+ "${GGML_HEADERS_METAL}"
+ "${GGML_HEADERS_EXTRA}")
set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
install(TARGETS ggml PUBLIC_HEADER)
diff --git a/Makefile b/Makefile
index b527f6f35b062..802ee6a47654c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# Define the default target now so that it is always the first target
BUILD_TARGETS = \
main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
- simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama beam-search \
+ simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama \
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm tests/test-c.o
# Binaries only useful for tests
@@ -547,23 +547,6 @@ ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h
$(NVCC_COMPILE)
endif # LLAMA_CUDA
-ifdef LLAMA_CLBLAST
- MK_CPPFLAGS += -DGGML_USE_CLBLAST $(shell pkg-config --cflags-only-I clblast OpenCL)
- MK_CFLAGS += $(shell pkg-config --cflags-only-other clblast OpenCL)
- MK_CXXFLAGS += $(shell pkg-config --cflags-only-other clblast OpenCL)
-
- # Mac provides OpenCL as a framework
- ifeq ($(UNAME_S),Darwin)
- MK_LDFLAGS += -lclblast -framework OpenCL
- else
- MK_LDFLAGS += $(shell pkg-config --libs clblast OpenCL)
- endif
- OBJS += ggml-opencl.o
-
-ggml-opencl.o: ggml-opencl.cpp ggml-opencl.h
- $(CXX) $(CXXFLAGS) -c $< -o $@
-endif # LLAMA_CLBLAST
-
ifdef LLAMA_VULKAN
MK_CPPFLAGS += -DGGML_USE_VULKAN
MK_LDFLAGS += -lvulkan
@@ -914,10 +897,6 @@ baby-llama: examples/baby-llama/baby-llama.cpp ggml.o llama.o $(COMMON_DEPS) tra
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
-beam-search: examples/beam-search/beam-search.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
- $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
- $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
-
finetune: examples/finetune/finetune.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
diff --git a/README-sycl.md b/README-sycl.md
index 37f0306dc4724..62b38135c01c0 100644
--- a/README-sycl.md
+++ b/README-sycl.md
@@ -29,7 +29,7 @@ The llama.cpp SYCL backend is designed to support **Intel GPU** firstly. Based o
When targeting **Intel CPU**, it is recommended to use llama.cpp for [Intel oneMKL](README.md#intel-onemkl) backend.
-It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS, cuBLAS, CLBlast etc..*. In beginning work, the oneAPI's [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) open-source migration tool (Commercial release [IntelĀ® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) was used for this purpose.
+It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS, cuBLAS, etc..*. In beginning work, the oneAPI's [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) open-source migration tool (Commercial release [IntelĀ® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) was used for this purpose.
## News
diff --git a/README.md b/README.md
index 8680460aab700..9d2a59d89d6f8 100644
--- a/README.md
+++ b/README.md
@@ -77,7 +77,7 @@ variety of hardware - locally and in the cloud.
- AVX, AVX2 and AVX512 support for x86 architectures
- 1.5-bit, 2-bit, 3-bit, 4-bit, 5-bit, 6-bit, and 8-bit integer quantization for faster inference and reduced memory use
- Custom CUDA kernels for running LLMs on NVIDIA GPUs (support for AMD GPUs via HIP)
-- Vulkan, SYCL, and (partial) OpenCL backend support
+- Vulkan and SYCL backend support
- CPU+GPU hybrid inference to partially accelerate models larger than the total VRAM capacity
Since its [inception](https://github.com/ggerganov/llama.cpp/issues/33#issuecomment-1465108022), the project has
@@ -364,17 +364,6 @@ In order to build llama.cpp you have four different options.
cmake --build build --config Debug
```
-- Using `Zig` (version 0.11 or later):
-
- Building for optimization levels and CPU features can be accomplished using standard build arguments, for example AVX2, FMA, F16C,
- it's also possible to cross compile for other operating systems and architectures:
-
- ```bash
- zig build -Doptimize=ReleaseFast -Dtarget=x86_64-windows-gnu -Dcpu=x86_64+avx2+fma+f16c
- ```
-
- The `zig targets` command will give you valid options to use.
-
- Using `gmake` (FreeBSD):
1. Install and activate [DRM in FreeBSD](https://wiki.freebsd.org/Graphics)
@@ -382,16 +371,11 @@ In order to build llama.cpp you have four different options.
3. Install compilation dependencies.
```bash
- sudo pkg install gmake automake autoconf pkgconf llvm15 clinfo clover \
- opencl clblast openblas
+ sudo pkg install gmake automake autoconf pkgconf llvm15 openblas
gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4
```
- **Notes:** With this packages you can build llama.cpp with OPENBLAS and
- CLBLAST support for use OpenCL GPU acceleration in FreeBSD. Please read
- the instructions for use and activate this options in this document below.
-
### Homebrew
On Mac and Linux, the homebrew package manager can be used via
@@ -410,7 +394,7 @@ argument.
### BLAS Build
-Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS and CLBlast. There are currently several different BLAS implementations available for build and use:
+Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS. There are currently several different BLAS implementations available for build and use:
- #### Accelerate Framework:
@@ -564,111 +548,6 @@ Building the program with BLAS support may lead to some performance improvements
| LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the HIP mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. Does not affect k-quants. |
| LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per HIP thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
-- #### CLBlast
-
- OpenCL acceleration is provided by the matrix multiplication kernels from the [CLBlast](https://github.com/CNugteren/CLBlast) project and custom kernels for ggml that can generate tokens on the GPU.
-
- You will need the [OpenCL SDK](https://github.com/KhronosGroup/OpenCL-SDK).
- - For Ubuntu, Debian, and Fedora the packages `opencl-headers`, `ocl-icd` may be needed.
-
- - For Windows, a pre-built SDK is available on the [OpenCL Releases](https://github.com/KhronosGroup/OpenCL-SDK/releases) page.
-
- -
- Installing the OpenCL SDK from source
-
- ```sh
- git clone --recurse-submodules https://github.com/KhronosGroup/OpenCL-SDK.git
- cd OpenCL-SDK
- cmake -B build -DBUILD_DOCS=OFF \
- -DBUILD_EXAMPLES=OFF \
- -DBUILD_TESTING=OFF \
- -DOPENCL_SDK_BUILD_SAMPLES=OFF \
- -DOPENCL_SDK_TEST_SAMPLES=OFF
- cmake --build build
- cmake --install build --prefix /some/path
- ```
-
-
- ##### Installing CLBlast
-
- Pre-built CLBlast binaries may be found on the [CLBlast Releases](https://github.com/CNugteren/CLBlast/releases) page. For Unix variants, it may also be found in your operating system's packages.
-
- Linux packaging:
- Fedora Linux:
- ```bash
- sudo dnf install clblast
- ```
-
- Alternatively, they may be built from source.
-
- -
- Windows:
-
- ```cmd
- set OPENCL_SDK_ROOT="C:/OpenCL-SDK-v2023.04.17-Win-x64"
- git clone https://github.com/CNugteren/CLBlast.git
- cd CLBlast
- cmake -B build -DBUILD_SHARED_LIBS=OFF -DOVERRIDE_MSVC_FLAGS_TO_MT=OFF -DTUNERS=OFF -DOPENCL_ROOT=%OPENCL_SDK_ROOT% -G "Visual Studio 17 2022" -A x64
- cmake --build build --config Release
- cmake --install build --prefix C:/CLBlast
- ```
-
- (note: `--config Release` at build time is the default and only relevant for Visual Studio builds - or multi-config Ninja builds)
-
- -
- Unix:
-
- ```sh
- git clone https://github.com/CNugteren/CLBlast.git
- cd CLBlast
- cmake -B build -DBUILD_SHARED_LIBS=OFF -DTUNERS=OFF
- cmake --build build --config Release
- cmake --install build --prefix /some/path
- ```
-
- Where `/some/path` is where the built library will be installed (default is `/usr/local`).
-
-
- ##### Building Llama with CLBlast
-
- - Build with make:
- ```sh
- make LLAMA_CLBLAST=1
- ```
- - CMake (Unix):
- ```sh
- cmake -B build -DLLAMA_CLBLAST=ON -DCLBlast_DIR=/some/path
- cmake --build build --config Release
- ```
- - CMake (Windows):
- ```cmd
- set CL_BLAST_CMAKE_PKG="C:/CLBlast/lib/cmake/CLBlast"
- git clone https://github.com/ggerganov/llama.cpp
- cd llama.cpp
- cmake -B build -DBUILD_SHARED_LIBS=OFF -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH=%CL_BLAST_CMAKE_PKG% -G "Visual Studio 17 2022" -A x64
- cmake --build build --config Release
- cmake --install build --prefix C:/LlamaCPP
- ```
-
- ##### Running Llama with CLBlast
-
- The CLBlast build supports `--gpu-layers|-ngl` like the CUDA version does.
-
- To select the correct platform (driver) and device (GPU), you can use the environment variables `GGML_OPENCL_PLATFORM` and `GGML_OPENCL_DEVICE`.
- The selection can be a number (starting from 0) or a text string to search:
-
- ```sh
- GGML_OPENCL_PLATFORM=1 ./main ...
- GGML_OPENCL_DEVICE=2 ./main ...
- GGML_OPENCL_PLATFORM=Intel ./main ...
- GGML_OPENCL_PLATFORM=AMD GGML_OPENCL_DEVICE=1 ./main ...
- ```
-
- The default behavior is to find the first GPU device, but when it is an integrated GPU on a laptop, for instance, the selectors are useful.
- Using the variables it is possible to select a CPU-based driver as well, if so desired.
-
- You can get a list of platforms and devices from the `clinfo -l` command, etc.
-
- #### Vulkan
**With docker**:
diff --git a/common/common.cpp b/common/common.cpp
index 022bfe28731ba..c8df9a4ce8ef5 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -7,20 +7,21 @@
#include
#include
+#include
#include
+#include
+#include
#include
#include
#include
-#include
#include
+#include
#include
#include
#include
#include
#include
#include
-#include
-#include
#if defined(__APPLE__) && defined(__MACH__)
#include
@@ -237,10 +238,7 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
}
}
- if (params.prompt_cache_all &&
- (params.interactive || params.interactive_first ||
- params.instruct)) {
-
+ if (params.prompt_cache_all && (params.interactive || params.interactive_first)) {
throw std::invalid_argument("error: --prompt-cache-all not supported in interactive mode yet\n");
}
@@ -265,22 +263,25 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
}
bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
- bool result = true;
+ const auto params_org = params; // the example can modify the default params
+
try {
- if (!gpt_params_parse_ex(argc, argv, params)) {
- gpt_params_print_usage(argc, argv, gpt_params());
- exit(0);
+ if (!gpt_params_parse_ex(argc, argv, params) || params.usage) {
+ params = params_org;
+ params.usage = true;
+ return false;
}
- }
- catch (const std::invalid_argument & ex) {
+ } catch (const std::invalid_argument & ex) {
fprintf(stderr, "%s\n", ex.what());
- gpt_params_print_usage(argc, argv, gpt_params());
- exit(1);
+ return false;
}
- return result;
+
+ return true;
}
bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param) {
+ const char split_delim = ',';
+
llama_sampling_params & sparams = params.sparams;
if (arg == "-s" || arg == "--seed") {
@@ -288,7 +289,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
invalid_param = true;
return true;
}
- // This is temporary, in the future the samplign state will be moved fully to llama_sampling_context.
+ // TODO: this is temporary, in the future the sampling state will be moved fully to llama_sampling_context.
params.seed = std::stoul(argv[i]);
sparams.seed = std::stoul(argv[i]);
return true;
@@ -349,6 +350,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.escape = true;
return true;
}
+ if (arg == "--no-escape") {
+ params.escape = false;
+ return true;
+ }
if (arg == "--prompt-cache") {
if (++i >= argc) {
invalid_param = true;
@@ -403,7 +408,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
}
return true;
}
- if (arg == "-n" || arg == "--n-predict") {
+ if (arg == "-n" || arg == "--predict" || arg == "--n-predict") {
if (++i >= argc) {
invalid_param = true;
return true;
@@ -900,34 +905,22 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.interactive = true;
return true;
}
- if (arg == "--interactive-specials") {
- params.interactive_specials = true;
- return true;
- }
- if (arg == "--special") {
+ if (arg == "-sp" || arg == "--special") {
params.special = true;
return true;
}
- if (arg == "--embedding") {
+ if (arg == "--embedding" || arg == "--embeddings") {
params.embedding = true;
return true;
}
- if (arg == "--interactive-first") {
+ if (arg == "-if" || arg == "--interactive-first") {
params.interactive_first = true;
return true;
}
- if (arg == "-ins" || arg == "--instruct") {
- params.instruct = true;
- return true;
- }
if (arg == "-cnv" || arg == "--conversation") {
params.conversation = true;
return true;
}
- if (arg == "-cml" || arg == "--chatml") {
- params.chatml = true;
- return true;
- }
if (arg == "--infill") {
params.infill = true;
return true;
@@ -964,7 +957,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.flash_attn = true;
return true;
}
- if (arg == "--color") {
+ if (arg == "-co" || arg == "--color") {
params.use_color = true;
return true;
}
@@ -972,26 +965,26 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.use_mlock = true;
return true;
}
- if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
+ if (arg == "-ngl" || arg == "--gpu-layers" || arg == "--n-gpu-layers") {
if (++i >= argc) {
invalid_param = true;
return true;
}
params.n_gpu_layers = std::stoi(argv[i]);
if (!llama_supports_gpu_offload()) {
- fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
+ fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers option will be ignored\n");
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
}
return true;
}
- if (arg == "--gpu-layers-draft" || arg == "-ngld" || arg == "--n-gpu-layers-draft") {
+ if (arg == "-ngld" || arg == "--gpu-layers-draft" || arg == "--gpu-layers-draft") {
if (++i >= argc) {
invalid_param = true;
return true;
}
params.n_gpu_layers_draft = std::stoi(argv[i]);
if (!llama_supports_gpu_offload()) {
- fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers-draft option will be ignored\n");
+ fprintf(stderr, "warning: not compiled with GPU offload support, --gpu-layers-draft option will be ignored\n");
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
}
return true;
@@ -1087,6 +1080,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
else { invalid_param = true; }
return true;
}
+ if (arg == "-v" || arg == "--verbose") {
+ params.verbose = true;
+ return true;
+ }
if (arg == "--verbose-prompt") {
params.verbose_prompt = true;
return true;
@@ -1151,24 +1148,24 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.ppl_stride = std::stoi(argv[i]);
return true;
}
- if (arg == "-ptc" || arg == "--print-token-count") {
+ if (arg == "--ppl-output-type") {
if (++i >= argc) {
invalid_param = true;
return true;
}
- params.n_print = std::stoi(argv[i]);
- return true;
- }
- if (arg == "--check-tensors") {
- params.check_tensors = true;
+ params.ppl_output_type = std::stoi(argv[i]);
return true;
}
- if (arg == "--ppl-output-type") {
+ if (arg == "-ptc" || arg == "--print-token-count") {
if (++i >= argc) {
invalid_param = true;
return true;
}
- params.ppl_output_type = std::stoi(argv[i]);
+ params.n_print = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--check-tensors") {
+ params.check_tensors = true;
return true;
}
if (arg == "--hellaswag") {
@@ -1242,19 +1239,15 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
}
return true;
}
- if (arg == "-h" || arg == "--help") {
- gpt_params_print_usage(argc, argv, gpt_params());
- exit(0);
+ if (arg == "-h" || arg == "--help" || arg == "--usage" ) {
+ params.usage = true;
+ return true;
}
if (arg == "--version") {
fprintf(stderr, "version: %d (%s)\n", LLAMA_BUILD_NUMBER, LLAMA_COMMIT);
fprintf(stderr, "built with %s for %s\n", LLAMA_COMPILER, LLAMA_BUILD_TARGET);
exit(0);
}
- if (arg == "--random-prompt") {
- params.random_prompt = true;
- return true;
- }
if (arg == "--in-prefix-bos") {
params.input_prefix_bos = true;
return true;
@@ -1321,6 +1314,229 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
}
return true;
}
+ if (arg == "--host") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.hostname = argv[i];
+ return true;
+ }
+ if (arg == "--port") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.port = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--path") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.public_path = argv[i];
+ return true;
+ }
+ if (arg == "--api-key") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.api_keys.push_back(argv[i]);
+ return true;
+ }
+ if (arg == "--api-key-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ std::ifstream key_file(argv[i]);
+ if (!key_file) {
+ fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
+ invalid_param = true;
+ return true;
+ }
+ std::string key;
+ while (std::getline(key_file, key)) {
+ if (!key.empty()) {
+ params.api_keys.push_back(key);
+ }
+ }
+ key_file.close();
+ return true;
+ }
+ if (arg == "--ssl-key-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.ssl_file_key = argv[i];
+ return true;
+ }
+ if (arg == "--ssl-cert-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.ssl_file_cert = argv[i];
+ return true;
+ }
+ if (arg == "--timeout" || arg == "-to") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.timeout_read = std::stoi(argv[i]);
+ params.timeout_write = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "-spf" || arg == "--system-prompt-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ std::ifstream file(argv[i]);
+ if (!file) {
+ fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
+ invalid_param = true;
+ return true;
+ }
+ std::string system_prompt;
+ std::copy(
+ std::istreambuf_iterator(file),
+ std::istreambuf_iterator(),
+ std::back_inserter(system_prompt)
+ );
+ params.system_prompt = system_prompt;
+ return true;
+ }
+ if (arg == "--log-format") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ if (std::strcmp(argv[i], "json") == 0) {
+ params.log_json = true;
+ } else if (std::strcmp(argv[i], "text") == 0) {
+ params.log_json = false;
+ } else {
+ invalid_param = true;
+ return true;
+ }
+ return true;
+ }
+ if (arg == "--no-slots") {
+ params.endpoint_slots = false;
+ return true;
+ }
+ if (arg == "--metrics") {
+ params.endpoint_metrics = true;
+ return true;
+ }
+ if (arg == "--slot-save-path") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.slot_save_path = argv[i];
+ // if doesn't end with DIRECTORY_SEPARATOR, add it
+ if (!params.slot_save_path.empty() && params.slot_save_path[params.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
+ params.slot_save_path += DIRECTORY_SEPARATOR;
+ }
+ return true;
+ }
+ if (arg == "--chat-template") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ if (!llama_chat_verify_template(argv[i])) {
+ fprintf(stderr, "error: the supplied chat template is not supported: %s\n", argv[i]);
+ fprintf(stderr, "note: llama.cpp does not use jinja parser, we only support commonly used templates\n");
+ invalid_param = true;
+ return true;
+ }
+ params.chat_template = argv[i];
+ return true;
+ }
+ if (arg == "-pps") {
+ params.is_pp_shared = true;
+ return true;
+ }
+ if (arg == "-npp") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ auto p = string_split(argv[i], split_delim);
+ params.n_pp.insert(params.n_pp.end(), p.begin(), p.end());
+ return true;
+ }
+ if (arg == "-ntg") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ auto p = string_split(argv[i], split_delim);
+ params.n_tg.insert(params.n_tg.end(), p.begin(), p.end());
+ return true;
+ }
+ if (arg == "-npl") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ auto p = string_split(argv[i], split_delim);
+ params.n_pl.insert(params.n_pl.end(), p.begin(), p.end());
+ return true;
+ }
+ if (arg == "--context-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ std::ifstream file(argv[i], std::ios::binary);
+ if (!file) {
+ fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
+ invalid_param = true;
+ return true;
+ }
+ params.context_files.push_back(argv[i]);
+ return true;
+ }
+ if (arg == "--chunk-size") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.chunk_size = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--chunk-separator") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.chunk_separator = argv[i];
+ return true;
+ }
+ if (arg == "--junk") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.n_junk = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--pos") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.i_pos = std::stoi(argv[i]);
+ return true;
+ }
#ifndef LOG_DISABLE_LOGS
// Parse args for logging parameters
if (log_param_single_parse(argv[i])) {
@@ -1348,6 +1564,16 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
return false;
}
+#ifdef __GNUC__
+#ifdef __MINGW32__
+#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
+#else
+#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
+#endif
+#else
+#define LLAMA_COMMON_ATTRIBUTE_FORMAT(...)
+#endif
+
void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
const llama_sampling_params & sparams = params.sparams;
@@ -1359,198 +1585,290 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
}
sampler_type_names.pop_back();
- printf("\n");
- printf("usage: %s [options]\n", argv[0]);
- printf("\n");
- printf("options:\n");
- printf(" -h, --help show this help message and exit\n");
- printf(" --version show version and build info\n");
- printf(" -i, --interactive run in interactive mode\n");
- printf(" --special special tokens output enabled\n");
- printf(" --interactive-specials allow special tokens in user text, in interactive mode\n");
- printf(" --interactive-first run in interactive mode and wait for input right away\n");
- printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
- printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
- printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");
- printf(" --multiline-input allows you to write or paste multiple lines without ending each in '\\'\n");
- printf(" -r PROMPT, --reverse-prompt PROMPT\n");
- printf(" halt generation at PROMPT, return control in interactive mode\n");
- printf(" (can be specified more than once for multiple prompts).\n");
- printf(" --color colorise output to distinguish prompt and user input from generations\n");
- printf(" -s SEED, --seed SEED RNG seed (default: -1, use random seed for < 0)\n");
- printf(" -t N, --threads N number of threads to use during generation (default: %d)\n", params.n_threads);
- printf(" -tb N, --threads-batch N\n");
- printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n");
- printf(" -td N, --threads-draft N");
- printf(" number of threads to use during generation (default: same as --threads)\n");
- printf(" -tbd N, --threads-batch-draft N\n");
- printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n");
- printf(" -p PROMPT, --prompt PROMPT\n");
- printf(" prompt to start generation with (default: empty)\n");
- printf(" -e, --escape process prompt escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\)\n");
- printf(" --prompt-cache FNAME file to cache prompt state for faster startup (default: none)\n");
- printf(" --prompt-cache-all if specified, saves user input and generations to cache as well.\n");
- printf(" not supported with --interactive or other interactive options\n");
- printf(" --prompt-cache-ro if specified, uses the prompt cache but does not update it.\n");
- printf(" --random-prompt start with a randomized prompt.\n");
- printf(" --in-prefix-bos prefix BOS to user inputs, preceding the `--in-prefix` string\n");
- printf(" --in-prefix STRING string to prefix user inputs with (default: empty)\n");
- printf(" --in-suffix STRING string to suffix after user inputs with (default: empty)\n");
- printf(" -f FNAME, --file FNAME\n");
- printf(" prompt file to start generation.\n");
- printf(" -bf FNAME, --binary-file FNAME\n");
- printf(" binary file containing multiple choice tasks.\n");
- printf(" -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict);
- printf(" -c N, --ctx-size N size of the prompt context (default: %d, 0 = loaded from model)\n", params.n_ctx);
- printf(" -b N, --batch-size N logical maximum batch size (default: %d)\n", params.n_batch);
- printf(" -ub N, --ubatch-size N\n");
- printf(" physical maximum batch size (default: %d)\n", params.n_ubatch);
- printf(" --samplers samplers that will be used for generation in the order, separated by \';\'\n");
- printf(" (default: %s)\n", sampler_type_names.c_str());
- printf(" --sampling-seq simplified sequence for samplers that will be used (default: %s)\n", sampler_type_chars.c_str());
- printf(" --top-k N top-k sampling (default: %d, 0 = disabled)\n", sparams.top_k);
- printf(" --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)sparams.top_p);
- printf(" --min-p N min-p sampling (default: %.1f, 0.0 = disabled)\n", (double)sparams.min_p);
- printf(" --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)sparams.tfs_z);
- printf(" --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)sparams.typical_p);
- printf(" --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", sparams.penalty_last_n);
- printf(" --repeat-penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)sparams.penalty_repeat);
- printf(" --presence-penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_present);
- printf(" --frequency-penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)sparams.penalty_freq);
- printf(" --dynatemp-range N dynamic temperature range (default: %.1f, 0.0 = disabled)\n", (double)sparams.dynatemp_range);
- printf(" --dynatemp-exp N dynamic temperature exponent (default: %.1f)\n", (double)sparams.dynatemp_exponent);
- printf(" --mirostat N use Mirostat sampling.\n");
- printf(" Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n");
- printf(" (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", sparams.mirostat);
- printf(" --mirostat-lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)sparams.mirostat_eta);
- printf(" --mirostat-ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)sparams.mirostat_tau);
- printf(" -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n");
- printf(" modifies the likelihood of token appearing in the completion,\n");
- printf(" i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n");
- printf(" or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n");
- printf(" --grammar GRAMMAR BNF-like grammar to constrain generations (see samples in grammars/ dir)\n");
- printf(" --grammar-file FNAME file to read grammar from\n");
- printf(" -j SCHEMA, --json-schema SCHEMA\n");
- printf(" JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object.\n");
- printf(" For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead\n");
- printf(" --cfg-negative-prompt PROMPT\n");
- printf(" negative prompt to use for guidance. (default: empty)\n");
- printf(" --cfg-negative-prompt-file FNAME\n");
- printf(" negative prompt file to use for guidance. (default: empty)\n");
- printf(" --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", sparams.cfg_scale);
- printf(" --rope-scaling {none,linear,yarn}\n");
- printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n");
- printf(" --rope-scale N RoPE context scaling factor, expands context by a factor of N\n");
- printf(" --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: loaded from model)\n");
- printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n");
- printf(" --yarn-orig-ctx N YaRN: original context size of model (default: 0 = model training context size)\n");
- printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n");
- printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n");
- printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow);
- printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast);
- printf(" --pooling {none,mean,cls}\n");
- printf(" pooling type for embeddings, use model default if unspecified\n");
- printf(" -dt N, --defrag-thold N\n");
- printf(" KV cache defragmentation threshold (default: %.1f, < 0 - disabled)\n", params.defrag_thold);
- printf(" --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
- printf(" --penalize-nl penalize newline tokens\n");
- printf(" --temp N temperature (default: %.1f)\n", (double)sparams.temp);
- printf(" --all-logits return logits for all tokens in the batch (default: disabled)\n");
- printf(" --hellaswag compute HellaSwag score over random tasks from datafile supplied with -f\n");
- printf(" --hellaswag-tasks N number of tasks to use when computing the HellaSwag score (default: %zu)\n", params.hellaswag_tasks);
- printf(" --winogrande compute Winogrande score over random tasks from datafile supplied with -f\n");
- printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks);
- printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n");
- printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks);
- printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base\n");
- printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
- printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft);
- printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
- printf(" -np N, --parallel N number of parallel sequences to decode (default: %d)\n", params.n_parallel);
- printf(" -ns N, --sequences N number of sequences to decode (default: %d)\n", params.n_sequences);
- printf(" -ps N, --p-split N speculative decoding split probability (default: %.1f)\n", (double)params.p_split);
- printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: disabled)\n");
- printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
- printf(" --mmproj MMPROJ_FILE path to a multimodal projector file for LLaVA. see examples/llava/README.md\n");
- printf(" --image IMAGE_FILE path to an image file. use with multimodal models. Specify multiple times for batching\n");
+ struct option_info {
+ LLAMA_COMMON_ATTRIBUTE_FORMAT(4, 5)
+ option_info(const std::string & tags, const char * args, const char * desc, ...) : tags(tags), args(args), desc(desc) {
+ va_list args_list;
+ va_start(args_list, desc);
+ char buffer[1024];
+ vsnprintf(buffer, sizeof(buffer), desc, args_list);
+ va_end(args_list);
+ this->desc = buffer;
+ }
+
+ option_info(const std::string & grp) : grp(grp) {}
+
+ std::string tags;
+ std::string args;
+ std::string desc;
+ std::string grp;
+ };
+
+ std::vector options;
+
+ // TODO: filter by tags
+
+ options.push_back({ "general" });
+ options.push_back({ "*", "-h, --help, --usage", "print usage and exit" });
+ options.push_back({ "*", " --version", "show version and build info" });
+ options.push_back({ "*", "-v, --verbose", "print verbose information" });
+ options.push_back({ "*", " --verbose-prompt", "print a verbose prompt before generation (default: %s)", params.verbose_prompt ? "true" : "false" });
+ options.push_back({ "*", " --no-display-prompt", "don't print prompt at generation (default: %s)", !params.display_prompt ? "true" : "false" });
+ options.push_back({ "*", "-co, --color", "colorise output to distinguish prompt and user input from generations (default: %s)", params.use_color ? "true" : "false" });
+ options.push_back({ "*", "-s, --seed SEED", "RNG seed (default: %d, use random seed for < 0)", params.seed });
+ options.push_back({ "*", "-t, --threads N", "number of threads to use during generation (default: %d)", params.n_threads });
+ options.push_back({ "*", "-tb, --threads-batch N", "number of threads to use during batch and prompt processing (default: same as --threads)" });
+ options.push_back({ "speculative", "-td, --threads-draft N", "number of threads to use during generation (default: same as --threads)" });
+ options.push_back({ "speculative", "-tbd, --threads-batch-draft N",
+ "number of threads to use during batch and prompt processing (default: same as --threads-draft)" });
+ options.push_back({ "speculative", " --draft N", "number of tokens to draft for speculative decoding (default: %d)", params.n_draft });
+ options.push_back({ "speculative", "-ps, --p-split N", "speculative decoding split probability (default: %.1f)", (double)params.p_split });
+ options.push_back({ "*", "-lcs, --lookup-cache-static FNAME",
+ "path to static lookup cache to use for lookup decoding (not updated by generation)" });
+ options.push_back({ "*", "-lcd, --lookup-cache-dynamic FNAME",
+ "path to dynamic lookup cache to use for lookup decoding (updated by generation)" });
+
+ options.push_back({ "*", "-c, --ctx-size N", "size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx });
+ options.push_back({ "*", "-n, --predict N", "number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)", params.n_predict });
+ options.push_back({ "*", "-b, --batch-size N", "logical maximum batch size (default: %d)", params.n_batch });
+ options.push_back({ "*", "-ub, --ubatch-size N", "physical maximum batch size (default: %d)", params.n_ubatch });
+ options.push_back({ "*", " --keep N", "number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep });
+ options.push_back({ "*", " --chunks N", "max number of chunks to process (default: %d, -1 = all)", params.n_chunks });
+ options.push_back({ "*", "-fa, --flash-attn", "enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled" });
+ options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with (default: '%s')", params.prompt.c_str() });
+ options.push_back({ "*", "-f, --file FNAME", "a file containing the prompt (default: none)" });
+ options.push_back({ "*", "-bf, --binary-file FNAME", "binary file containing the prompt (default: none)" });
+ options.push_back({ "*", "-e, --escape", "process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false" });
+ options.push_back({ "*", " --no-escape", "do not process escape sequences" });
+ options.push_back({ "main", "-ptc, --print-token-count N", "print token count every N tokens (default: %d)", params.n_print });
+ options.push_back({ "main", " --prompt-cache FNAME", "file to cache prompt state for faster startup (default: none)" });
+ options.push_back({ "main", " --prompt-cache-all", "if specified, saves user input and generations to cache as well\n"
+ "not supported with --interactive or other interactive options" });
+ options.push_back({ "main", " --prompt-cache-ro", "if specified, uses the prompt cache but does not update it" });
+ options.push_back({ "main", "-r, --reverse-prompt PROMPT",
+ "halt generation at PROMPT, return control in interactive mode\n"
+ "can be specified more than once for multiple prompts" });
+ options.push_back({ "main", "-sp, --special", "special tokens output enabled (default: %s)", params.special ? "true" : "false" });
+ options.push_back({ "main", "-cnv, --conversation", "run in conversation mode (does not print special tokens and suffix/prefix) (default: %s)", params.conversation ? "true" : "false" });
+ options.push_back({ "main infill", "-i, --interactive", "run in interactive mode (default: %s)", params.interactive ? "true" : "false" });
+ options.push_back({ "main infill", "-if, --interactive-first", "run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false" });
+ options.push_back({ "main infill", "-mli, --multiline-input", "allows you to write or paste multiple lines without ending each in '\\'" });
+ options.push_back({ "main infill", " --in-prefix-bos", "prefix BOS to user inputs, preceding the `--in-prefix` string" });
+ options.push_back({ "main infill", " --in-prefix STRING", "string to prefix user inputs with (default: empty)" });
+ options.push_back({ "main infill", " --in-suffix STRING", "string to suffix after user inputs with (default: empty)" });
+
+ options.push_back({ "sampling" });
+ options.push_back({ "*", " --samplers SAMPLERS", "samplers that will be used for generation in the order, separated by \';\'\n"
+ "(default: %s)", sampler_type_names.c_str() });
+ options.push_back({ "*", " --sampling-seq SEQUENCE",
+ "simplified sequence for samplers that will be used (default: %s)", sampler_type_chars.c_str() });
+ options.push_back({ "*", " --ignore-eos", "ignore end of stream token and continue generating (implies --logit-bias EOS-inf)" });
+ options.push_back({ "*", " --penalize-nl", "penalize newline tokens (default: %s)", sparams.penalize_nl ? "true" : "false" });
+ options.push_back({ "*", " --temp N", "temperature (default: %.1f)", (double)sparams.temp });
+ options.push_back({ "*", " --top-k N", "top-k sampling (default: %d, 0 = disabled)", sparams.top_k });
+ options.push_back({ "*", " --top-p N", "top-p sampling (default: %.1f, 1.0 = disabled)", (double)sparams.top_p });
+ options.push_back({ "*", " --min-p N", "min-p sampling (default: %.1f, 0.0 = disabled)", (double)sparams.min_p });
+ options.push_back({ "*", " --tfs N", "tail free sampling, parameter z (default: %.1f, 1.0 = disabled)", (double)sparams.tfs_z });
+ options.push_back({ "*", " --typical N", "locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)", (double)sparams.typical_p });
+ options.push_back({ "*", " --repeat-last-n N", "last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)", sparams.penalty_last_n });
+ options.push_back({ "*", " --repeat-penalty N", "penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)", (double)sparams.penalty_repeat });
+ options.push_back({ "*", " --presence-penalty N", "repeat alpha presence penalty (default: %.1f, 0.0 = disabled)", (double)sparams.penalty_present });
+ options.push_back({ "*", " --frequency-penalty N", "repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)", (double)sparams.penalty_freq });
+ options.push_back({ "*", " --dynatemp-range N", "dynamic temperature range (default: %.1f, 0.0 = disabled)", (double)sparams.dynatemp_range });
+ options.push_back({ "*", " --dynatemp-exp N", "dynamic temperature exponent (default: %.1f)", (double)sparams.dynatemp_exponent });
+ options.push_back({ "*", " --mirostat N", "use Mirostat sampling.\n"
+ "Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n"
+ "(default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)", sparams.mirostat });
+ options.push_back({ "*", " --mirostat-lr N", "Mirostat learning rate, parameter eta (default: %.1f)", (double)sparams.mirostat_eta });
+ options.push_back({ "*", " --mirostat-ent N", "Mirostat target entropy, parameter tau (default: %.1f)", (double)sparams.mirostat_tau });
+ options.push_back({ "*", " -l TOKEN_ID(+/-)BIAS", "modifies the likelihood of token appearing in the completion,\n"
+ "i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n"
+ "or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'" });
+ options.push_back({ "main", " --cfg-negative-prompt PROMPT",
+ "negative prompt to use for guidance (default: '%s')", sparams.cfg_negative_prompt.c_str() });
+ options.push_back({ "main", " --cfg-negative-prompt-file FNAME",
+ "negative prompt file to use for guidance" });
+ options.push_back({ "main", " --cfg-scale N", "strength of guidance (default: %.1f, 1.0 = disable)", (double)sparams.cfg_scale });
+
+ options.push_back({ "grammar" });
+ options.push_back({ "*", " --grammar GRAMMAR", "BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '%s')", sparams.grammar.c_str() });
+ options.push_back({ "*", " --grammar-file FNAME", "file to read grammar from" });
+ options.push_back({ "*", "-j, --json-schema SCHEMA",
+ "JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object\n"
+ "For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead" });
+
+ options.push_back({ "embedding" });
+ options.push_back({ "embedding", " --pooling {none,mean,cls}",
+ "pooling type for embeddings, use model default if unspecified" });
+
+ options.push_back({ "context hacking" });
+ options.push_back({ "*", " --rope-scaling {none,linear,yarn}",
+ "RoPE frequency scaling method, defaults to linear unless specified by the model" });
+ options.push_back({ "*", " --rope-scale N", "RoPE context scaling factor, expands context by a factor of N" });
+ options.push_back({ "*", " --rope-freq-base N", "RoPE base frequency, used by NTK-aware scaling (default: loaded from model)" });
+ options.push_back({ "*", " --rope-freq-scale N", "RoPE frequency scaling factor, expands context by a factor of 1/N" });
+ options.push_back({ "*", " --yarn-orig-ctx N", "YaRN: original context size of model (default: %d = model training context size)", params.yarn_orig_ctx });
+ options.push_back({ "*", " --yarn-ext-factor N", "YaRN: extrapolation mix factor (default: %.1f, 0.0 = full interpolation)", (double)params.yarn_ext_factor });
+ options.push_back({ "*", " --yarn-attn-factor N", "YaRN: scale sqrt(t) or attention magnitude (default: %.1f)", (double)params.yarn_attn_factor });
+ options.push_back({ "*", " --yarn-beta-slow N", "YaRN: high correction dim or alpha (default: %.1f)", (double)params.yarn_beta_slow });
+ options.push_back({ "*", " --yarn-beta-fast N", "YaRN: low correction dim or beta (default: %.1f)", (double)params.yarn_beta_fast });
+ options.push_back({ "*", "-gan, --grp-attn-n N", "group-attention factor (default: %d)", params.grp_attn_n });
+ options.push_back({ "*", "-gaw, --grp-attn-w N", "group-attention width (default: %.1f)", (double)params.grp_attn_w });
+ options.push_back({ "*", "-dkvc, --dump-kv-cache", "verbose print of the KV cache" });
+ options.push_back({ "*", "-nkvo, --no-kv-offload", "disable KV offload" });
+ options.push_back({ "*", "-ctk, --cache-type-k TYPE", "KV cache data type for K (default: %s)", params.cache_type_k.c_str() });
+ options.push_back({ "*", "-ctv, --cache-type-v TYPE", "KV cache data type for V (default: %s)", params.cache_type_v.c_str() });
+
+ options.push_back({ "perplexity" });
+ options.push_back({ "perplexity", " --all-logits", "return logits for all tokens in the batch (default: %s)", params.logits_all ? "true" : "false" });
+ options.push_back({ "perplexity", " --hellaswag", "compute HellaSwag score over random tasks from datafile supplied with -f" });
+ options.push_back({ "perplexity", " --hellaswag-tasks N", "number of tasks to use when computing the HellaSwag score (default: %zu)", params.hellaswag_tasks });
+ options.push_back({ "perplexity", " --winogrande", "compute Winogrande score over random tasks from datafile supplied with -f" });
+ options.push_back({ "perplexity", " --winogrande-tasks N", "number of tasks to use when computing the Winogrande score (default: %zu)", params.winogrande_tasks });
+ options.push_back({ "perplexity", " --multiple-choice", "compute multiple choice score over random tasks from datafile supplied with -f" });
+ options.push_back({ "perplexity", " --multiple-choice-tasks N",
+ "number of tasks to use when computing the multiple choice score (default: %zu)", params.multiple_choice_tasks });
+ options.push_back({ "perplexity", " --kl-divergence", "computes KL-divergence to logits provided via --kl-divergence-base" });
+ options.push_back({ "perplexity", " --ppl-stride N", "stride for perplexity calculation (default: %d)", params.ppl_stride });
+ options.push_back({ "perplexity", " --ppl-output-type {0,1}",
+ "output type for perplexity calculation (default: %d)", params.ppl_output_type });
+
+ options.push_back({ "parallel" });
+ options.push_back({ "*", "-dt, --defrag-thold N", "KV cache defragmentation threshold (default: %.1f, < 0 - disabled)", (double)params.defrag_thold });
+ options.push_back({ "*", "-np, --parallel N", "number of parallel sequences to decode (default: %d)", params.n_parallel });
+ options.push_back({ "*", "-ns, --sequences N", "number of sequences to decode (default: %d)", params.n_sequences });
+ options.push_back({ "*", "-cb, --cont-batching", "enable continuous batching (a.k.a dynamic batching) (default: %s)", params.cont_batching ? "enabled" : "disabled" });
+
+ options.push_back({ "multi-modality" });
+ options.push_back({ "*", " --mmproj FILE", "path to a multimodal projector file for LLaVA. see examples/llava/README.md" });
+ options.push_back({ "*", " --image FILE", "path to an image file. use with multimodal models. Specify multiple times for batching" });
+
+ options.push_back({ "backend" });
+ options.push_back({ "*", " --rpc SERVERS", "comma separated list of RPC servers" });
if (llama_supports_mlock()) {
- printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
+ options.push_back({ "*", " --mlock", "force system to keep model in RAM rather than swapping or compressing" });
}
if (llama_supports_mmap()) {
- printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
- }
- printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
- printf(" - distribute: spread execution evenly over all nodes\n");
- printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
- printf(" - numactl: use the CPU map provided by numactl\n");
- printf(" if run without this previously, it is recommended to drop the system page cache before using this\n");
- printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n");
+ options.push_back({ "*", " --no-mmap", "do not memory-map model (slower load but may reduce pageouts if not using mlock)" });
+ }
+ options.push_back({ "*", " --numa TYPE", "attempt optimizations that help on some NUMA systems\n"
+ " - distribute: spread execution evenly over all nodes\n"
+ " - isolate: only spawn threads on CPUs on the node that execution started on\n"
+ " - numactl: use the CPU map provided by numactl\n"
+ "if run without this previously, it is recommended to drop the system page cache before using this\n"
+ "see https://github.com/ggerganov/llama.cpp/issues/1437" });
+
if (llama_supports_gpu_offload()) {
- printf(" -ngl N, --n-gpu-layers N\n");
- printf(" number of layers to store in VRAM\n");
- printf(" -ngld N, --n-gpu-layers-draft N\n");
- printf(" number of layers to store in VRAM for the draft model\n");
- printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
- printf(" how to split the model across multiple GPUs, one of:\n");
- printf(" - none: use one GPU only\n");
- printf(" - layer (default): split layers and KV across GPUs\n");
- printf(" - row: split rows across GPUs\n");
- printf(" -ts SPLIT, --tensor-split SPLIT\n");
- printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
- printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
- printf(" or for intermediate results and KV (with split-mode = row) (default: %d)\n", params.main_gpu);
- }
- printf(" --rpc SERVERS comma separated list of RPC servers\n");
- printf(" --verbose-prompt print a verbose prompt before generation (default: %s)\n", params.verbose_prompt ? "true" : "false");
- printf(" --no-display-prompt don't print prompt at generation (default: %s)\n", !params.display_prompt ? "true" : "false");
- printf(" -gan N, --grp-attn-n N\n");
- printf(" group-attention factor (default: %d)\n", params.grp_attn_n);
- printf(" -gaw N, --grp-attn-w N\n");
- printf(" group-attention width (default: %.1f)\n", (double)params.grp_attn_w);
- printf(" -dkvc, --dump-kv-cache\n");
- printf(" verbose print of the KV cache\n");
- printf(" -nkvo, --no-kv-offload\n");
- printf(" disable KV offload\n");
- printf(" -ctk TYPE, --cache-type-k TYPE\n");
- printf(" KV cache data type for K (default: %s)\n", params.cache_type_k.c_str());
- printf(" -ctv TYPE, --cache-type-v TYPE\n");
- printf(" KV cache data type for V (default: %s)\n", params.cache_type_v.c_str());
- printf(" --simple-io use basic IO for better compatibility in subprocesses and limited consoles\n");
- printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
- printf(" --lora-scaled FNAME S apply LoRA adapter with user defined scaling S (implies --no-mmap)\n");
- printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
- printf(" --control-vector FNAME\n");
- printf(" add a control vector\n");
- printf(" --control-vector-scaled FNAME S\n");
- printf(" add a control vector with user defined scaling S\n");
- printf(" --control-vector-layer-range START END\n");
- printf(" layer range to apply the control vector(s) to, start and end inclusive\n");
- printf(" -m FNAME, --model FNAME\n");
- printf(" model path (default: models/$filename with filename from --hf-file or --model-url if set, otherwise %s)\n", DEFAULT_MODEL_PATH);
- printf(" -md FNAME, --model-draft FNAME\n");
- printf(" draft model for speculative decoding (default: unused)\n");
- printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
- printf(" model download url (default: unused)\n");
- printf(" -hfr REPO, --hf-repo REPO\n");
- printf(" Hugging Face model repository (default: unused)\n");
- printf(" -hff FILE, --hf-file FILE\n");
- printf(" Hugging Face model file (default: unused)\n");
- printf(" -ld LOGDIR, --logdir LOGDIR\n");
- printf(" path under which to save YAML logs (no logging if unset)\n");
- printf(" -lcs FNAME, --lookup-cache-static FNAME\n");
- printf(" path to static lookup cache to use for lookup decoding (not updated by generation)\n");
- printf(" -lcd FNAME, --lookup-cache-dynamic FNAME\n");
- printf(" path to dynamic lookup cache to use for lookup decoding (updated by generation)\n");
- printf(" --override-kv KEY=TYPE:VALUE\n");
- printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
- printf(" types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
- printf(" -ptc N, --print-token-count N\n");
- printf(" print token count every N tokens (default: %d)\n", params.n_print);
- printf(" --check-tensors check model tensor data for invalid values\n");
- printf("\n");
+ options.push_back({ "*", "-ngl, --gpu-layers N",
+ "number of layers to store in VRAM" });
+ options.push_back({ "*", "-ngld, --gpu-layers-draft N",
+ "number of layers to store in VRAM for the draft model" });
+ options.push_back({ "*", "-sm, --split-mode SPLIT_MODE",
+ "how to split the model across multiple GPUs, one of:\n"
+ " - none: use one GPU only\n"
+ " - layer (default): split layers and KV across GPUs\n"
+ " - row: split rows across GPUs" });
+ options.push_back({ "*", "-ts, --tensor-split SPLIT",
+ "fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1" });
+ options.push_back({ "*", "-mg, --main-gpu i", "the GPU to use for the model (with split-mode = none),\n"
+ "or for intermediate results and KV (with split-mode = row) (default: %d)", params.main_gpu });
+ }
+
+ options.push_back({ "model" });
+ options.push_back({ "*", " --check-tensors", "check model tensor data for invalid values (default: %s)", params.check_tensors ? "true" : "false" });
+ options.push_back({ "*", " --override-kv KEY=TYPE:VALUE",
+ "advanced option to override model metadata by key. may be specified multiple times.\n"
+ "types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false" });
+ options.push_back({ "*", " --lora FNAME", "apply LoRA adapter (implies --no-mmap)" });
+ options.push_back({ "*", " --lora-scaled FNAME S", "apply LoRA adapter with user defined scaling S (implies --no-mmap)" });
+ options.push_back({ "*", " --lora-base FNAME", "optional model to use as a base for the layers modified by the LoRA adapter" });
+ options.push_back({ "*", " --control-vector FNAME", "add a control vector" });
+ options.push_back({ "*", " --control-vector-scaled FNAME SCALE",
+ "add a control vector with user defined scaling SCALE" });
+ options.push_back({ "*", " --control-vector-layer-range START END",
+ "layer range to apply the control vector(s) to, start and end inclusive" });
+ options.push_back({ "*", "-m, --model FNAME", "model path (default: models/$filename with filename from --hf-file\n"
+ "or --model-url if set, otherwise %s)", DEFAULT_MODEL_PATH });
+ options.push_back({ "*", "-md, --model-draft FNAME", "draft model for speculative decoding (default: unused)" });
+ options.push_back({ "*", "-mu, --model-url MODEL_URL", "model download url (default: unused)" });
+ options.push_back({ "*", "-hfr, --hf-repo REPO", "Hugging Face model repository (default: unused)" });
+ options.push_back({ "*", "-hff, --hf-file FILE", "Hugging Face model file (default: unused)" });
+
+ options.push_back({ "retrieval" });
+ options.push_back({ "retrieval", " --context-file FNAME", "file to load context from (repeat to specify multiple files)" });
+ options.push_back({ "retrieval", " --chunk-size N", "minimum length of embedded text chunks (default: %d)", params.chunk_size });
+ options.push_back({ "retrieval", " --chunk-separator STRING",
+ "separator between chunks (default: '%s')", params.chunk_separator.c_str() });
+
+ options.push_back({ "passkey" });
+ options.push_back({ "passkey", " --junk N", "number of times to repeat the junk text (default: %d)", params.n_junk });
+ options.push_back({ "passkey", " --pos N", "position of the passkey in the junk text (default: %d)", params.i_pos });
+
+ options.push_back({ "bench" });
+ options.push_back({ "bench", "-pps", "is the prompt shared across parallel sequences (default: %s)", params.is_pp_shared ? "true" : "false" });
+ options.push_back({ "bench", "-npp n0,n1,...", "number of prompt tokens" });
+ options.push_back({ "bench", "-ntg n0,n1,...", "number of text generation tokens" });
+ options.push_back({ "bench", "-npl n0,n1,...", "number of parallel prompts" });
+
+ options.push_back({ "server" });
+ options.push_back({ "server", " --host HOST", "ip address to listen (default: %s)", params.hostname.c_str() });
+ options.push_back({ "server", " --port PORT", "port to listen (default: %d)", params.port });
+ options.push_back({ "server", " --path PATH", "path to serve static files from (default: %s)", params.public_path.c_str() });
+ options.push_back({ "server", " --embedding(s)", "enable embedding endpoint (default: %s)", params.embedding ? "enabled" : "disabled" });
+ options.push_back({ "server", " --api-key KEY", "API key to use for authentication (default: none)" });
+ options.push_back({ "server", " --api-key-file FNAME", "path to file containing API keys (default: none)" });
+ options.push_back({ "server", " --ssl-key-file FNAME", "path to file a PEM-encoded SSL private key" });
+ options.push_back({ "server", " --ssl-cert-file FNAME", "path to file a PEM-encoded SSL certificate" });
+ options.push_back({ "server", " --timeout N", "server read/write timeout in seconds (default: %d)", params.timeout_read });
+ options.push_back({ "server", " --system-prompt-file FNAME",
+ "set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications" });
+ options.push_back({ "server", " --log-format {text,json}",
+ "log output format: json or text (default: json)" });
+ options.push_back({ "server", " --metrics", "enable prometheus compatible metrics endpoint (default: %s)", params.endpoint_metrics ? "enabled" : "disabled" });
+ options.push_back({ "server", " --no-slots", "disables slots monitoring endpoint (default: %s)", params.endpoint_slots ? "enabled" : "disabled" });
+ options.push_back({ "server", " --slot-save-path PATH", "path to save slot kv cache (default: disabled)" });
+ options.push_back({ "server", " --chat-template JINJA_TEMPLATE",
+ "set custom jinja chat template (default: template taken from model's metadata)\n"
+ "only commonly used templates are accepted:\n"
+ "https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template" });
+
#ifndef LOG_DISABLE_LOGS
- log_print_usage();
+ options.push_back({ "logging" });
+ options.push_back({ "*", " --simple-io", "use basic IO for better compatibility in subprocesses and limited consoles" });
+ options.push_back({ "*", "-ld, --logdir LOGDIR", "path under which to save YAML logs (no logging if unset)" });
+ options.push_back({ "logging", " --log-test", "Run simple logging test" });
+ options.push_back({ "logging", " --log-disable", "Disable trace logs" });
+ options.push_back({ "logging", " --log-enable", "Enable trace logs" });
+ options.push_back({ "logging", " --log-file FNAME", "Specify a log filename (without extension)" });
+ options.push_back({ "logging", " --log-new", "Create a separate new log file on start. "
+ "Each log file will have unique name: \"..log\"" });
+ options.push_back({ "logging", " --log-append", "Don't truncate the old log file." });
#endif // LOG_DISABLE_LOGS
+
+ printf("usage: %s [options]\n", argv[0]);
+
+ for (const auto & o : options) {
+ if (!o.grp.empty()) {
+ printf("\n%s:\n\n", o.grp.c_str());
+ continue;
+ }
+ printf(" %-32s", o.args.c_str());
+ if (o.args.length() > 30) {
+ printf("\n%34s", "");
+ }
+
+ const auto desc = o.desc;
+ size_t start = 0;
+ size_t end = desc.find('\n');
+ while (end != std::string::npos) {
+ printf("%s\n%34s", desc.substr(start, end - start).c_str(), "");
+ start = end + 1;
+ end = desc.find('\n', start);
+ }
+
+ printf("%s\n", desc.substr(start).c_str());
+ }
+ printf("\n");
}
std::string gpt_params_get_system_info(const gpt_params & params) {
@@ -1610,24 +1928,6 @@ std::string string_get_sortable_timestamp() {
return std::string(timestamp_no_ns) + "." + std::string(timestamp_ns);
}
-std::string string_random_prompt(std::mt19937 & rng) {
- const int r = rng() % 10;
- switch (r) {
- case 0: return "So";
- case 1: return "Once upon a time";
- case 2: return "When";
- case 3: return "The";
- case 4: return "After";
- case 5: return "If";
- case 6: return "import";
- case 7: return "He";
- case 8: return "She";
- case 9: return "They";
- }
-
- GGML_UNREACHABLE();
-}
-
void string_process_escapes(std::string & input) {
std::size_t input_len = input.length();
std::size_t output_idx = 0;
@@ -2503,6 +2803,12 @@ bool llama_should_add_bos_token(const llama_model * model) {
return add_bos != -1 ? bool(add_bos) : (llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
}
+bool llama_chat_verify_template(const std::string & tmpl) {
+ llama_chat_message chat[] = {{"user", "test"}};
+ int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
+ return res >= 0;
+}
+
//
// KV cache utils
//
@@ -2844,7 +3150,6 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
fprintf(stream, "cpu_has_avx512_vnni: %s\n", ggml_cpu_has_avx512_vnni() ? "true" : "false");
fprintf(stream, "cpu_has_cuda: %s\n", ggml_cpu_has_cuda() ? "true" : "false");
fprintf(stream, "cpu_has_vulkan: %s\n", ggml_cpu_has_vulkan() ? "true" : "false");
- fprintf(stream, "cpu_has_clblast: %s\n", ggml_cpu_has_clblast() ? "true" : "false");
fprintf(stream, "cpu_has_kompute: %s\n", ggml_cpu_has_kompute() ? "true" : "false");
fprintf(stream, "cpu_has_fma: %s\n", ggml_cpu_has_fma() ? "true" : "false");
fprintf(stream, "cpu_has_gpublas: %s\n", ggml_cpu_has_gpublas() ? "true" : "false");
@@ -2903,9 +3208,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
yaml_dump_string_multiline(stream, "in_prefix", params.input_prefix.c_str());
fprintf(stream, "in_prefix_bos: %s # default: false\n", params.input_prefix_bos ? "true" : "false");
yaml_dump_string_multiline(stream, "in_suffix", params.input_prefix.c_str());
- fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
- fprintf(stream, "interactive_specials: %s # default: false\n", params.interactive_specials ? "true" : "false");
fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
@@ -2955,7 +3258,6 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
fprintf(stream, "prompt_cache_all: %s # default: false\n", params.prompt_cache_all ? "true" : "false");
fprintf(stream, "prompt_cache_ro: %s # default: false\n", params.prompt_cache_ro ? "true" : "false");
yaml_dump_vector_int(stream, "prompt_tokens", prompt_tokens);
- fprintf(stream, "random_prompt: %s # default: false\n", params.random_prompt ? "true" : "false");
fprintf(stream, "repeat_penalty: %f # default: 1.1\n", sparams.penalty_repeat);
fprintf(stream, "reverse_prompt:\n");
diff --git a/common/common.h b/common/common.h
index 264504830a7f0..e0a08a61b7424 100644
--- a/common/common.h
+++ b/common/common.h
@@ -60,7 +60,7 @@ struct gpt_params {
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
int32_t n_threads_batch_draft = -1;
int32_t n_predict = -1; // new tokens to predict
- int32_t n_ctx = 512; // context size
+ int32_t n_ctx = 0; // context size
int32_t n_batch = 2048; // logical batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_ubatch = 512; // physical batch size for prompt processing (must be >=32 to use BLAS)
int32_t n_keep = 0; // number of tokens to keep from initial prompt
@@ -99,23 +99,23 @@ struct gpt_params {
// // sampling parameters
struct llama_sampling_params sparams;
- std::string model = ""; // model path
- std::string model_draft = ""; // draft model for speculative decoding
+ std::string model = ""; // model path
+ std::string model_draft = ""; // draft model for speculative decoding
std::string model_alias = "unknown"; // model alias
- std::string model_url = ""; // model url to download
- std::string hf_repo = ""; // HF repo
- std::string hf_file = ""; // HF file
+ std::string model_url = ""; // model url to download
+ std::string hf_repo = ""; // HF repo
+ std::string hf_file = ""; // HF file
std::string prompt = "";
- std::string prompt_file = ""; // store the external prompt file name
- std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
- std::string input_prefix = ""; // string to prefix user inputs with
- std::string input_suffix = ""; // string to suffix user inputs with
- std::vector antiprompt; // string upon seeing which more user input is prompted
- std::string logdir = ""; // directory in which to save YAML log files
+ std::string prompt_file = ""; // store the external prompt file name
+ std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
+ std::string input_prefix = ""; // string to prefix user inputs with
+ std::string input_suffix = ""; // string to suffix user inputs with
+ std::string logdir = ""; // directory in which to save YAML log files
std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding
std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding
- std::string logits_file = ""; // file for saving *all* logits
+ std::string logits_file = ""; // file for saving *all* logits
+ std::vector antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts)
std::vector kv_overrides;
// TODO: avoid tuple, use struct
@@ -127,8 +127,8 @@ struct gpt_params {
int32_t control_vector_layer_start = -1; // layer range for control vector
int32_t control_vector_layer_end = -1; // layer range for control vector
- int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
- int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
+ int32_t ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
+ int32_t ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
// (which is more convenient to use for plotting)
//
bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
@@ -142,19 +142,17 @@ struct gpt_params {
bool kl_divergence = false; // compute KL divergence
- bool random_prompt = false; // do not randomize prompt if none provided
+ bool usage = false; // print usage
bool use_color = false; // use color to distinguish generations and inputs
- bool interactive = false; // interactive mode
- bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
bool special = false; // enable special token output
+ bool interactive = false; // interactive mode
+ bool interactive_first = false; // wait for user input immediately
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
- bool chatml = false; // chatml mode (used for models trained on chatml syntax)
bool prompt_cache_all = false; // save user input and generations to prompt cache
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
bool embedding = false; // get only sentence embedding
- bool escape = false; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
- bool interactive_first = false; // wait for user input immediately
+ bool escape = true; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
bool multiline_input = false; // reverse the usage of `\`
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
bool cont_batching = true; // insert new sequences for decoding on-the-fly
@@ -162,10 +160,10 @@ struct gpt_params {
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
bool ignore_eos = false; // ignore generated EOS tokens
- bool instruct = false; // instruction mode (used for Alpaca models)
bool logits_all = false; // return logits for all tokens in the batch
bool use_mmap = true; // use mmap for faster loads
bool use_mlock = false; // use mlock to keep model in memory
+ bool verbose = false;
bool verbose_prompt = false; // print prompt tokens before generation
bool display_prompt = true; // print prompt before generation
bool infill = false; // use infill mode
@@ -180,6 +178,47 @@ struct gpt_params {
// multimodal models (see examples/llava)
std::string mmproj = ""; // path to multimodal projector
std::vector image; // path to image file(s)
+
+ // server params
+ int32_t port = 8080;
+ int32_t timeout_read = 600;
+ int32_t timeout_write = timeout_read;
+ int32_t n_threads_http = -1;
+
+ std::string hostname = "127.0.0.1";
+ std::string public_path = "";
+ std::string chat_template = "";
+ std::string system_prompt = "";
+
+ std::vector api_keys;
+
+ std::string ssl_file_key = "";
+ std::string ssl_file_cert = "";
+
+ bool endpoint_slots = true;
+ bool endpoint_metrics = false;
+
+ bool log_json = false;
+
+ std::string slot_save_path;
+
+ // batched-bench params
+ bool is_pp_shared = false;
+
+ std::vector n_pp;
+ std::vector n_tg;
+ std::vector n_pl;
+
+ // retrieval params
+ std::vector context_files; // context files to embed
+
+ int32_t chunk_size = 64; // chunk size for context embedding
+
+ std::string chunk_separator = "\n"; // chunk separator for context embedding
+
+ // passkey params
+ int32_t n_junk = 250; // number of times to repeat the junk text
+ int32_t i_pos = -1; // position of the passkey in the junk text
};
void gpt_params_handle_model_default(gpt_params & params);
@@ -199,7 +238,20 @@ std::vector string_split(std::string input, char separator);
std::string string_strip(const std::string & str);
std::string string_get_sortable_timestamp();
-std::string string_random_prompt(std::mt19937 & rng);
+
+template
+static std::vector string_split(const std::string & str, char delim) {
+ std::vector values;
+ std::istringstream str_stream(str);
+ std::string token;
+ while (std::getline(str_stream, token, delim)) {
+ T value;
+ std::istringstream token_stream(token);
+ token_stream >> value;
+ values.push_back(value);
+ }
+ return values;
+}
bool string_parse_kv_override(const char * data, std::vector & overrides);
void string_process_escapes(std::string & input);
@@ -282,6 +334,13 @@ std::string llama_detokenize_bpe(
// defaults to true when model type is SPM, otherwise false.
bool llama_should_add_bos_token(const llama_model * model);
+//
+// Chat template utils
+//
+
+// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
+bool llama_chat_verify_template(const std::string & tmpl);
+
//
// KV cache utils
//
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index b40ee4ccb2ec1..53002f8e1ce76 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -15,7 +15,6 @@ else()
add_subdirectory(baby-llama)
add_subdirectory(batched)
add_subdirectory(batched-bench)
- add_subdirectory(beam-search)
add_subdirectory(benchmark)
add_subdirectory(convert-llama2c-to-ggml)
add_subdirectory(embedding)
diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp
index bf0125e753746..4f6c3746a106c 100644
--- a/examples/baby-llama/baby-llama.cpp
+++ b/examples/baby-llama/baby-llama.cpp
@@ -522,8 +522,8 @@ static struct ggml_tensor * forward(
// wk shape [n_embd, n_embd, 1, 1]
// Qcur shape [n_embd/n_head, n_head, N, 1]
// Kcur shape [n_embd/n_head, n_head, N, 1]
- struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N), KQ_pos, n_rot, 0, 0);
- struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N), KQ_pos, n_rot, 0, 0);
+ struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N), KQ_pos, n_rot, 0);
+ struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N), KQ_pos, n_rot, 0);
// store key and value to memory
{
@@ -759,8 +759,8 @@ static struct ggml_tensor * forward_batch(
// wk shape [n_embd, n_embd, 1, 1]
// Qcur shape [n_embd/n_head, n_head, N, n_batch]
// Kcur shape [n_embd/n_head, n_head, N, n_batch]
- struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N, n_batch), KQ_pos, n_rot, 0, 0);
- struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N, n_batch), KQ_pos, n_rot, 0, 0);
+ struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wq, cur), n_embd/n_head, n_head, N, n_batch), KQ_pos, n_rot, 0);
+ struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_4d(ctx0, ggml_mul_mat(ctx0, model->layers[il].wk, cur), n_embd/n_head, n_head, N, n_batch), KQ_pos, n_rot, 0);
assert_shape_4d(Qcur, n_embd/n_head, n_head, N, n_batch);
assert_shape_4d(Kcur, n_embd/n_head, n_head, N, n_batch);
@@ -1056,7 +1056,7 @@ static struct ggml_tensor * forward_lora(
model->layers[il].wqb,
cur)),
n_embd/n_head, n_head, N),
- KQ_pos, n_rot, 0, 0);
+ KQ_pos, n_rot, 0);
struct ggml_tensor * Kcur = ggml_rope(ctx0,
ggml_reshape_3d(ctx0,
ggml_mul_mat(ctx0,
@@ -1065,7 +1065,7 @@ static struct ggml_tensor * forward_lora(
model->layers[il].wkb,
cur)),
n_embd/n_head, n_head, N),
- KQ_pos, n_rot, 0, 0);
+ KQ_pos, n_rot, 0);
// store key and value to memory
{
diff --git a/examples/batched-bench/README.md b/examples/batched-bench/README.md
index bf951baf7f096..fa4baf6403e9e 100644
--- a/examples/batched-bench/README.md
+++ b/examples/batched-bench/README.md
@@ -10,16 +10,16 @@ There are 2 modes of operation:
- `prompt is shared` - there is a common prompt of size `PP` used by all batches (i.e. `N_KV = PP + B*TG`)
```bash
-./batched-bench MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [IS_PP_SHARED] [NGL] [MMQ]
+./batched-bench -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]
# LLaMA 7B, F16, N_KV_MAX = 16384 (8GB), prompt not shared
-./batched-bench ./models/llama-7b/ggml-model-f16.gguf 16384 2048 512 0 99
+./batched-bench -m ./models/llama-7b/ggml-model-f16.gguf -c 16384 -b 2048 -ub 512 -ngl 99
# LLaMA 7B, Q8_0, N_KV_MAX = 16384 (8GB), prompt is shared
-./batched-bench ./models/llama-7b/ggml-model-q8_0.gguf 16384 2048 512 1 99
+./batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 16384 -b 2048 -ub 512 -ngl 99 -pps
# custom set of batches
-./batched-bench ./models/llama-7b/ggml-model-q8_0.gguf 2048 512 512 0 999 0 128,256,512 128,256 1,2,4,8,16,32
+./batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 2048 -b 512 -ub 512 -ngl 999 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32
```
## Sample results
diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp
index 2924d8116f44f..718f0a61a1878 100644
--- a/examples/batched-bench/batched-bench.cpp
+++ b/examples/batched-bench/batched-bench.cpp
@@ -28,67 +28,27 @@ static std::vector parse_list(char * p) {
return ret;
}
-int main(int argc, char ** argv) {
- gpt_params params;
-
- if (argc == 1 || argv[1][0] == '-') {
- printf("usage: %s MODEL_PATH [N_KV_MAX] [N_BATCH] [N_UBATCH] [FATTN] [IS_PP_SHARED] [NGL] \n" , argv[0]);
- printf(" , and PL are comma-separated lists of numbers without spaces\n\n");
- printf(" example: %s ggml-model-f16.gguf 2048 2048 512 0 999 128,256,512 128,256 1,2,4,8,16,32\n\n", argv[0]);
- return 1 ;
- }
-
- int n_kv_max = 2048;
- int n_batch = 2048;
- int n_ubatch = 512;
- bool flash_attn = false;
- int is_pp_shared = 0;
- int n_gpu_layers = 0;
-
- std::vector n_pp = { 128, 256, 512, 1024, 2048, 3584, 7680, };
- std::vector n_tg = { 128, 256, };
- std::vector n_pl = { 1, 2, 4, 8, 16, 32, };
- //std::vector n_pl = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 32, };
-
- if (argc >= 2) {
- params.model = argv[1];
- }
-
- if (argc >= 3) {
- n_kv_max = std::atoi(argv[2]);
- }
-
- if (argc >= 4) {
- n_batch = std::atoi(argv[3]);
- }
-
- if (argc >= 5) {
- n_ubatch = std::atoi(argv[4]);
- }
-
- if (argc >= 6) {
- flash_attn = std::atoi(argv[5]);
- }
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
- if (argc >= 7) {
- is_pp_shared = std::atoi(argv[6]);
- }
+ LOG_TEE("\nexample usage:\n");
+ LOG_TEE("\n %s -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]\n", argv[0]);
+ LOG_TEE("\n");
+}
- if (argc >= 8) {
- n_gpu_layers = std::atoi(argv[7]);
- }
+int main(int argc, char ** argv) {
+ gpt_params params;
- if (argc >= 9) {
- n_pp = parse_list(argv[8]);
+ if (!gpt_params_parse(argc, argv, params)) {
+ print_usage(argc, argv, params);
+ return 1;
}
- if (argc >= 10) {
- n_tg = parse_list(argv[9]);
- }
+ int is_pp_shared = params.is_pp_shared;
- if (argc >= 11) {
- n_pl = parse_list(argv[10]);
- }
+ std::vector n_pp = params.n_pp;
+ std::vector n_tg = params.n_tg;
+ std::vector n_pl = params.n_pl;
// init LLM
@@ -97,12 +57,7 @@ int main(int argc, char ** argv) {
// initialize the model
- llama_model_params model_params = llama_model_default_params();
-
- const std::vector t_split(llama_max_devices(), 0.0f);
-
- model_params.n_gpu_layers = n_gpu_layers;
- model_params.tensor_split = t_split.data();
+ llama_model_params model_params = llama_model_params_from_gpt_params(params);
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
@@ -111,16 +66,7 @@ int main(int argc, char ** argv) {
return 1;
}
- llama_context_params ctx_params = llama_context_default_params();
-
- ctx_params.seed = 1234;
- ctx_params.n_ctx = n_kv_max;
- ctx_params.n_batch = n_batch;
- ctx_params.n_ubatch = n_ubatch;
- ctx_params.flash_attn = flash_attn;
-
- ctx_params.n_threads = params.n_threads;
- ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
+ llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
// ensure enough sequences are available
ctx_params.n_seq_max = *std::max_element(n_pl.begin(), n_pl.end());
@@ -132,6 +78,8 @@ int main(int argc, char ** argv) {
return 1;
}
+ const int32_t n_kv_max = llama_n_ctx(ctx);
+
llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
// decode in batches of ctx_params.n_batch tokens
@@ -175,7 +123,7 @@ int main(int argc, char ** argv) {
}
LOG_TEE("\n");
- LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, is_pp_shared = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, n_batch, n_ubatch, flash_attn, is_pp_shared, n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
+ LOG_TEE("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, is_pp_shared = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, params.flash_attn, params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
LOG_TEE("\n");
LOG_TEE("|%6s | %6s | %4s | %6s | %8s | %8s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "B", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s", "T s", "S t/s");
diff --git a/examples/batched/README.md b/examples/batched/README.md
index 5d730331769fb..ed204c3088882 100644
--- a/examples/batched/README.md
+++ b/examples/batched/README.md
@@ -3,7 +3,7 @@
The example demonstrates batched generation from a given prompt
```bash
-./batched ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" 4
+./batched -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is" -np 4
...
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp
index 591bc6e57645c..62d9b144d3340 100644
--- a/examples/batched/batched.cpp
+++ b/examples/batched/batched.cpp
@@ -7,48 +7,31 @@
#include
#include
-int main(int argc, char ** argv) {
- gpt_params params;
-
- if (argc == 1 || argv[1][0] == '-') {
- printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL] [LEN] [NGL]\n" , argv[0]);
- return 1 ;
- }
-
- // number of parallel batches
- int n_parallel = 1;
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
- // total length of the sequences including the prompt
- int n_len = 32;
-
- // number of layers to offload to the GPU
- int n_gpu_layers = 0;
-
- if (argc >= 2) {
- params.model = argv[1];
- }
+ LOG_TEE("\nexample usage:\n");
+ LOG_TEE("\n %s -m model.gguf -p \"Hello my name is\" -n 32 -np 4\n", argv[0]);
+ LOG_TEE("\n");
+}
- if (argc >= 3) {
- params.prompt = argv[2];
- }
+int main(int argc, char ** argv) {
+ gpt_params params;
- if (argc >= 4) {
- n_parallel = std::atoi(argv[3]);
- }
+ params.prompt = "Hello my name is";
+ params.n_predict = 32;
- if (argc >= 5) {
- n_len = std::atoi(argv[4]);
+ if (!gpt_params_parse(argc, argv, params)) {
+ print_usage(argc, argv, params);
+ return 1;
}
- if (argc >= 6) {
- n_gpu_layers = std::atoi(argv[5]);
- }
- if (params.prompt.empty()) {
- params.prompt = "Hello my name is";
- }
+ // number of parallel batches
+ int n_parallel = params.n_parallel;
- string_process_escapes(params.prompt);
+ // total length of the sequences including the prompt
+ int n_predict = 32;
// init LLM
@@ -57,9 +40,7 @@ int main(int argc, char ** argv) {
// initialize the model
- llama_model_params model_params = llama_model_default_params();
-
- model_params.n_gpu_layers = n_gpu_layers;
+ llama_model_params model_params = llama_model_params_from_gpt_params(params);
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
@@ -73,18 +54,14 @@ int main(int argc, char ** argv) {
std::vector tokens_list;
tokens_list = ::llama_tokenize(model, params.prompt, true);
- const int n_kv_req = tokens_list.size() + (n_len - tokens_list.size())*n_parallel;
+ const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel;
// initialize the context
- llama_context_params ctx_params = llama_context_default_params();
+ llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
- ctx_params.seed = 1234;
ctx_params.n_ctx = n_kv_req;
- ctx_params.n_batch = std::max(n_len, n_parallel);
- ctx_params.n_seq_max = n_parallel;
- ctx_params.n_threads = params.n_threads;
- ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
+ ctx_params.n_batch = std::max(n_predict, n_parallel);
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
@@ -93,9 +70,9 @@ int main(int argc, char ** argv) {
return 1;
}
- const int n_ctx = llama_n_ctx(ctx);
+ const int n_ctx = llama_n_ctx(ctx);
- LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_len, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req);
+ LOG_TEE("\n%s: n_predict = %d, n_ctx = %d, n_batch = %u, n_parallel = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, ctx_params.n_batch, n_parallel, n_kv_req);
// make sure the KV cache is big enough to hold all the prompt and generated tokens
if (n_kv_req > n_ctx) {
@@ -156,7 +133,7 @@ int main(int argc, char ** argv) {
const auto t_main_start = ggml_time_us();
- while (n_cur <= n_len) {
+ while (n_cur <= n_predict) {
// prepare the next batch
llama_batch_clear(batch);
@@ -192,7 +169,7 @@ int main(int argc, char ** argv) {
//const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
// is it an end of generation? -> mark the stream as finished
- if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) {
+ if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
i_batch[i] = -1;
LOG_TEE("\n");
if (n_parallel > 1) {
diff --git a/examples/beam-search/CMakeLists.txt b/examples/beam-search/CMakeLists.txt
deleted file mode 100644
index f0e37468b1030..0000000000000
--- a/examples/beam-search/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-set(TARGET beam-search)
-add_executable(${TARGET} beam-search.cpp)
-install(TARGETS ${TARGET} RUNTIME)
-target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
diff --git a/examples/beam-search/beam-search.cpp b/examples/beam-search/beam-search.cpp
deleted file mode 100644
index 3d34378a506eb..0000000000000
--- a/examples/beam-search/beam-search.cpp
+++ /dev/null
@@ -1,188 +0,0 @@
-#include "common.h"
-#include "llama.h"
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
-#include
-#include
-#elif defined (_WIN32)
-#define WIN32_LEAN_AND_MEAN
-#ifndef NOMINMAX
-# define NOMINMAX
-#endif
-#include
-#include
-#endif
-
-// Used for debugging to print out beam tokens.
-struct ostream_beam_view {
- llama_context * ctx;
- llama_beam_view beam_view;
-};
-
-static std::ostream & operator<<(std::ostream & os, const ostream_beam_view & obv) {
- os << "p(" << obv.beam_view.p << ") eob(" << std::boolalpha << obv.beam_view.eob << ") tokens(";
- for (size_t i = 0 ; i < obv.beam_view.n_tokens ; ++i) {
- os << llama_token_to_piece(obv.ctx, obv.beam_view.tokens[i]);
- }
- return os << ')';
-}
-
-// Put here anything you want back in beam_search_callback().
-struct beam_search_callback_data {
- llama_context * ctx;
- std::vector response;
-};
-
-// In this case, end-of-beam (eob) is equivalent to end-of-sentence (eos) but this need not always be the same.
-// For example, eob can be flagged due to maximum token length, stop words, etc.
-static bool is_at_eob(const beam_search_callback_data & callback_data, const llama_token * tokens, size_t n_tokens) {
- return n_tokens && llama_token_is_eog(llama_get_model(callback_data.ctx), tokens[n_tokens-1]);
-}
-
-// Function matching type llama_beam_search_callback_fn_t.
-// Custom callback example is called each time the beams lengths increase:
-// * Show progress by printing ',' following by number of convergent beam tokens if any.
-// * When all beams converge to a common prefix, they are made available in beams_state.beams[0].
-// This is also called when the stop condition is met.
-// Collect tokens into std::vector response which is pointed to by callback_data.
-static void beam_search_callback(void * callback_data_ptr, llama_beams_state beams_state) {
- auto& callback_data = *static_cast(callback_data_ptr);
- // Mark beams as EOS as needed.
- for (size_t i = 0 ; i < beams_state.n_beams ; ++i) {
- llama_beam_view& beam_view = beams_state.beam_views[i];
- if (!beam_view.eob && is_at_eob(callback_data, beam_view.tokens, beam_view.n_tokens)) {
- beam_view.eob = true;
- }
- }
- printf(","); // Show progress
- if (const size_t n = beams_state.common_prefix_length) {
- callback_data.response.resize(callback_data.response.size() + n);
- assert(0u < beams_state.n_beams);
- const llama_token * tokens = beams_state.beam_views[0].tokens;
- std::copy(tokens, tokens + n, callback_data.response.end() - n);
- printf("%zu", n);
- }
- fflush(stdout);
-#if 1 // DEBUG: print current beams for this iteration
- std::cout << "\n\nCurrent beams (last_call=" << beams_state.last_call << "):\n";
- for (size_t i = 0 ; i < beams_state.n_beams ; ++i) {
- std::cout << "beams["< 3 )
- {
- params.prompt = argv[3];
- }
-
- if ( params.prompt.empty() )
- {
- params.prompt = "### Request:\nHow many countries are there?\n\n### Response:\n";
- }
-
- //---------------------------------
- // Init LLM :
- //---------------------------------
-
- llama_backend_init();
- llama_numa_init(params.numa);
-
- llama_model * model;
- llama_context * ctx;
-
- std::tie(model, ctx) = llama_init_from_gpt_params( params );
-
- if ( model == NULL )
- {
- fprintf( stderr , "%s: error: unable to load model\n" , __func__ );
- return 1;
- }
-
- //---------------------------------
- // Tokenize the prompt :
- //---------------------------------
-
- std::vector tokens_list = llama_tokenize(ctx, params.prompt, true);
-
- const size_t max_context_size = llama_n_ctx( ctx );
- const size_t max_tokens_list_size = max_context_size - 4 ;
-
- if (tokens_list.size() > max_tokens_list_size)
- {
- fprintf( stderr , "%s: error: prompt too long (%zu tokens, max %zu)\n" ,
- __func__ , tokens_list.size() , max_tokens_list_size );
- return 1;
- }
-
- fprintf( stderr, "\n\n" );
-
- // Print the tokens from the prompt :
-
- for( auto id : tokens_list )
- {
- std::cout << llama_token_to_piece(ctx, id);
- }
- std::cout << std::flush;
-
- int n_past = 0;
-
- if (llama_decode(ctx, llama_batch_get_one(tokens_list.data(), tokens_list.size(), n_past, 0)))
- {
- fprintf(stderr, "%s : failed to eval prompt.\n" , __func__ );
- return 1;
- }
- n_past += tokens_list.size();
-
- beam_search_callback_data callback_data{ctx, {}};
- size_t const beam_width = static_cast(params.n_beams);
- int const n_predict = 256;
- llama_beam_search(ctx, beam_search_callback, &callback_data, beam_width, n_past, n_predict);
-
- std::cout << "\n\n";
- for (llama_token const token_id : callback_data.response) {
- std::cout << llama_token_to_piece(ctx,token_id);
- }
- std::cout << std::endl;
-
- llama_free( ctx );
- llama_free_model( model );
-
- llama_backend_free();
-
- return 0;
-}
diff --git a/examples/convert-legacy-llama.py b/examples/convert-legacy-llama.py
index fd840101569a9..721a57c00299b 100755
--- a/examples/convert-legacy-llama.py
+++ b/examples/convert-legacy-llama.py
@@ -176,7 +176,7 @@ class Params:
rope_scaling_type: gguf.RopeScalingType | None = None
f_rope_freq_base: float | None = None
f_rope_scale: float | None = None
- n_orig_ctx: int | None = None
+ n_ctx_orig: int | None = None
rope_finetuned: bool | None = None
ftype: GGMLFileType | None = None
@@ -226,7 +226,7 @@ def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params:
with open(config_path) as f:
config = json.load(f)
- rope_scaling_type = f_rope_scale = n_orig_ctx = rope_finetuned = None
+ rope_scaling_type = f_rope_scale = n_ctx_orig = rope_finetuned = None
rope_scaling = config.get("rope_scaling")
if rope_scaling is not None and (typ := rope_scaling.get("type")):
@@ -236,7 +236,7 @@ def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params:
rope_scaling_type = gguf.RopeScalingType.LINEAR
elif typ == "yarn":
rope_scaling_type = gguf.RopeScalingType.YARN
- n_orig_ctx = rope_scaling['original_max_position_embeddings']
+ n_ctx_orig = rope_scaling['original_max_position_embeddings']
rope_finetuned = rope_scaling['finetuned']
else:
raise NotImplementedError(f'Unknown rope scaling type: {typ}')
@@ -272,7 +272,7 @@ def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params:
f_rope_freq_base = config.get("rope_theta"),
rope_scaling_type = rope_scaling_type,
f_rope_scale = f_rope_scale,
- n_orig_ctx = n_orig_ctx,
+ n_ctx_orig = n_ctx_orig,
rope_finetuned = rope_finetuned,
)
@@ -864,8 +864,8 @@ def add_meta_arch(self, params: Params) -> None:
self.gguf.add_rope_scaling_type(params.rope_scaling_type)
self.gguf.add_rope_scaling_factor(params.f_rope_scale)
- if params.n_orig_ctx is not None:
- self.gguf.add_rope_scaling_orig_ctx_len(params.n_orig_ctx)
+ if params.n_ctx_orig is not None:
+ self.gguf.add_rope_scaling_orig_ctx_len(params.n_ctx_orig)
if params.rope_finetuned is not None:
self.gguf.add_rope_scaling_finetuned(params.rope_finetuned)
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp
index 004399b5f7eb8..244751e003d9e 100644
--- a/examples/embedding/embedding.cpp
+++ b/examples/embedding/embedding.cpp
@@ -63,6 +63,7 @@ int main(int argc, char ** argv) {
gpt_params params;
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
@@ -79,9 +80,6 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
std::mt19937 rng(params.seed);
- if (params.random_prompt) {
- params.prompt = string_random_prompt(rng);
- }
llama_backend_init();
llama_numa_init(params.numa);
diff --git a/examples/eval-callback/eval-callback.cpp b/examples/eval-callback/eval-callback.cpp
index 51d67d6d97ae6..64cd338c26351 100644
--- a/examples/eval-callback/eval-callback.cpp
+++ b/examples/eval-callback/eval-callback.cpp
@@ -140,20 +140,18 @@ static bool run(llama_context * ctx, const gpt_params & params) {
}
int main(int argc, char ** argv) {
-
callback_data cb_data;
gpt_params params;
+
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
print_build_info();
std::mt19937 rng(params.seed);
- if (params.random_prompt) {
- params.prompt = string_random_prompt(rng);
- }
llama_backend_init();
llama_numa_init(params.numa);
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp
index 22425730f20eb..71a4333ee7908 100644
--- a/examples/finetune/finetune.cpp
+++ b/examples/finetune/finetune.cpp
@@ -564,7 +564,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
const int rope_mode = 0;
return ggml_rope_ext(ctx,
- t, KQ_pos, nullptr, n_rot, rope_mode, n_ctx, 0,
+ t, KQ_pos, nullptr, n_rot, rope_mode, n_ctx,
rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
);
};
diff --git a/examples/gguf-split/tests.sh b/examples/gguf-split/tests.sh
index 7ca6fa7f20de8..3bc0fa47110e3 100755
--- a/examples/gguf-split/tests.sh
+++ b/examples/gguf-split/tests.sh
@@ -41,7 +41,7 @@ echo PASS
echo
# 2b. Test the sharded model is loading properly
-$MAIN --model $WORK_PATH/ggml-model-split-00001-of-00006.gguf --random-prompt --n-predict 32
+$MAIN --model $WORK_PATH/ggml-model-split-00001-of-00006.gguf --n-predict 32
echo PASS
echo
@@ -51,7 +51,7 @@ echo PASS
echo
# 3b. Test the merged model is loading properly
-$MAIN --model $WORK_PATH/ggml-model-merge.gguf --random-prompt --n-predict 32
+$MAIN --model $WORK_PATH/ggml-model-merge.gguf --n-predict 32
echo PASS
echo
@@ -61,7 +61,7 @@ echo PASS
echo
# 4b. Test the sharded model is loading properly
-$MAIN --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00007.gguf --random-prompt --n-predict 32
+$MAIN --model $WORK_PATH/ggml-model-split-32-tensors-00001-of-00007.gguf --n-predict 32
echo PASS
echo
@@ -71,7 +71,7 @@ echo
#echo
# 5b. Test the merged model is loading properly
-#$MAIN --model $WORK_PATH/ggml-model-merge-2.gguf --random-prompt --n-predict 32
+#$MAIN --model $WORK_PATH/ggml-model-merge-2.gguf --n-predict 32
#echo PASS
#echo
@@ -81,7 +81,7 @@ echo PASS
echo
# 6b. Test the sharded model is loading properly
-$MAIN --model $WORK_PATH/ggml-model-split-2G-00001-of-00002.gguf --random-prompt --n-predict 32
+$MAIN --model $WORK_PATH/ggml-model-split-2G-00001-of-00002.gguf --n-predict 32
echo PASS
echo
diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp
index 52fd719b38ee5..2135157916c97 100644
--- a/examples/gritlm/gritlm.cpp
+++ b/examples/gritlm/gritlm.cpp
@@ -153,7 +153,9 @@ static std::string gritlm_instruction(const std::string & instruction) {
int main(int argc, char * argv[]) {
gpt_params params;
+
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp
index 25a2351cc64d3..e050c09d2f38b 100644
--- a/examples/imatrix/imatrix.cpp
+++ b/examples/imatrix/imatrix.cpp
@@ -533,7 +533,6 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params, bool
}
int main(int argc, char ** argv) {
-
StatParams sparams;
std::string prev_result_file;
std::string combine_files;
@@ -581,7 +580,9 @@ int main(int argc, char ** argv) {
gpt_params params;
params.n_batch = 512;
- if (!gpt_params_parse(args.size(), args.data(), params)) {
+
+ if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
@@ -597,9 +598,6 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
std::mt19937 rng(params.seed);
- if (params.random_prompt) {
- params.prompt = string_random_prompt(rng);
- }
sparams.dataset = params.prompt_file;
g_collector.set_parameters(std::move(sparams));
diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp
index 539f781847893..0e4ec79c693fa 100644
--- a/examples/infill/infill.cpp
+++ b/examples/infill/infill.cpp
@@ -107,6 +107,7 @@ int main(int argc, char ** argv) {
g_params = ¶ms;
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
@@ -139,27 +140,6 @@ int main(int argc, char ** argv) {
LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
params.n_ctx = 8;
}
- if (params.instruct) {
- printf("\n************\n");
- printf("%s: please use the 'main' tool for instruct mode\n", __func__);
- printf("************\n\n");
-
- return 0;
- }
- if (params.chatml) {
- printf("\n************\n");
- printf("%s: please use the 'main' tool for chatml mode\n", __func__);
- printf("************\n\n");
-
- return 0;
- }
- if (!params.antiprompt.empty()) {
- printf("\n************\n");
- printf("%s: please use the 'main' tool for antiprompt mode\n", __func__);
- printf("************\n\n");
-
- return 0;
- }
if (!params.interactive_first && (params.input_prefix.empty() && params.input_suffix.empty())) {
printf("\n************\n");
printf("%s: please use '--interactive_first' or specify '--in_prefix' and/or '--in_suffix'\n", __func__);
@@ -167,20 +147,6 @@ int main(int argc, char ** argv) {
return 0;
}
- if (params.random_prompt) {
- printf("\n************\n");
- printf("%s: please use the 'main' tool for random prompt mode\n", __func__);
- printf("************\n\n");
-
- return 0;
- }
- if (!params.path_prompt_cache.empty()) {
- printf("\n************\n");
- printf("%s: infill does not support prompt caching\n", __func__);
- printf("************\n\n");
-
- return 0;
- }
if (params.rope_freq_base != 0.0) {
LOG_TEE("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
@@ -207,17 +173,13 @@ int main(int argc, char ** argv) {
llama_model * model;
llama_context * ctx;
- llama_context * ctx_guidance = NULL;
+
g_model = &model;
g_ctx = &ctx;
// load the model and apply lora adapter, if any
LOG("%s: load the model and apply lora adapter, if any\n", __func__);
std::tie(model, ctx) = llama_init_from_gpt_params(params);
- if (sparams.cfg_scale > 1.f) {
- struct llama_context_params lparams = llama_context_params_from_gpt_params(params);
- ctx_guidance = llama_new_context_with_model(model, lparams);
- }
if (model == NULL) {
LOG_TEE("%s: error: unable to load model\n", __func__);
@@ -273,25 +235,6 @@ int main(int argc, char ** argv) {
LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
}
- // Tokenize negative prompt
- std::vector guidance_inp;
- int guidance_offset = 0;
- int original_prompt_len = 0;
- if (ctx_guidance) {
- LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt));
-
- guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, true);
- LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str());
-
- std::vector original_inp = ::llama_tokenize(ctx, params.prompt, true);
- LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str());
-
- original_prompt_len = original_inp.size();
- guidance_offset = (int)guidance_inp.size() - original_prompt_len;
- LOG("original_prompt_len: %s", log_tostr(original_prompt_len));
- LOG("guidance_offset: %s", log_tostr(guidance_offset));
- }
-
if ((int) embd_inp.size() > n_ctx - 4) {
LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
return 1;
@@ -319,15 +262,6 @@ int main(int argc, char ** argv) {
LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
}
- if (ctx_guidance) {
- LOG_TEE("\n");
- LOG_TEE("%s: negative prompt: '%s'\n", __func__, sparams.cfg_negative_prompt.c_str());
- LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
- for (int i = 0; i < (int) guidance_inp.size(); i++) {
- LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str());
- }
- }
-
if (params.n_keep > 0) {
LOG_TEE("%s: static prompt based on n_keep: '", __func__);
for (int i = 0; i < params.n_keep; i++) {
@@ -395,12 +329,11 @@ int main(int argc, char ** argv) {
is_interacting = params.interactive_first;
}
- bool input_echo = true;
+ bool input_echo = true;
- int n_past = 0;
- int n_remain = params.n_predict;
- int n_consumed = 0;
- int n_past_guidance = 0;
+ int n_past = 0;
+ int n_remain = params.n_predict;
+ int n_consumed = 0;
std::vector input_tokens; g_input_tokens = &input_tokens;
std::vector output_tokens; g_output_tokens = &output_tokens;
@@ -410,7 +343,6 @@ int main(int argc, char ** argv) {
console::set_display(console::prompt);
std::vector embd;
- std::vector embd_guidance;
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
@@ -436,7 +368,7 @@ int main(int argc, char ** argv) {
// if we run out of context:
// - take the n_keep first tokens from the original prompt (via n_past)
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
- if (n_past + (int) embd.size() + std::max(0, guidance_offset) > n_ctx) {
+ if (n_past + (int) embd.size() > n_ctx) {
if (params.n_predict == -2) {
LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
break;
@@ -453,11 +385,7 @@ int main(int argc, char ** argv) {
n_past -= n_discard;
- if (ctx_guidance) {
- n_past_guidance -= n_discard;
- }
-
- LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
+ LOG("after swap: n_past = %d\n", n_past);
LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd).c_str());
@@ -465,45 +393,6 @@ int main(int argc, char ** argv) {
// evaluate tokens in batches
// embd is typically prepared beforehand to fit within a batch, but not always
-
- if (ctx_guidance) {
- int input_size = 0;
- llama_token * input_buf = NULL;
-
- if (n_past_guidance < (int) guidance_inp.size()) {
- // Guidance context should have the same data with these modifications:
- //
- // * Replace the initial prompt
- // * Shift everything by guidance_offset
- embd_guidance = guidance_inp;
- if (embd.begin() + original_prompt_len < embd.end()) {
- embd_guidance.insert(
- embd_guidance.end(),
- embd.begin() + original_prompt_len,
- embd.end()
- );
- }
-
- input_buf = embd_guidance.data();
- input_size = embd_guidance.size();
-
- LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance).c_str());
- } else {
- input_buf = embd.data();
- input_size = embd.size();
- }
-
- for (int i = 0; i < input_size; i += params.n_batch) {
- int n_eval = std::min(input_size - i, params.n_batch);
- if (llama_decode(ctx_guidance, llama_batch_get_one(input_buf + i, n_eval, n_past_guidance, 0))) {
- LOG_TEE("%s : failed to eval\n", __func__);
- return 1;
- }
-
- n_past_guidance += n_eval;
- }
- }
-
for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
int n_eval = (int) embd.size() - i;
if (n_eval > params.n_batch) {
@@ -525,11 +414,9 @@ int main(int argc, char ** argv) {
}
embd.clear();
- embd_guidance.clear();
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
-
- const llama_token id = llama_sampling_sample(ctx_sampling, ctx, ctx_guidance);
+ const llama_token id = llama_sampling_sample(ctx_sampling, ctx, nullptr);
llama_sampling_accept(ctx_sampling, ctx, id, true);
@@ -583,7 +470,6 @@ int main(int argc, char ** argv) {
// if not currently processing queued inputs;
if ((int) embd_inp.size() <= n_consumed) {
-
// deal with eot token in infill mode
if ((llama_sampling_last(ctx_sampling) == llama_token_eot(model) || is_interacting) && params.interactive){
if (is_interacting && !params.interactive_first) {
@@ -644,7 +530,6 @@ int main(int argc, char ** argv) {
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
embd_inp.push_back(llama_token_middle(model));
embd.clear();
- embd_guidance.clear();
n_remain = params.n_predict;
n_past = 0;
n_consumed = 0;
@@ -751,7 +636,6 @@ int main(int argc, char ** argv) {
llama_print_timings(ctx);
write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
- if (ctx_guidance) { llama_free(ctx_guidance); }
llama_free(ctx);
llama_free_model(model);
diff --git a/examples/llama-bench/README.md b/examples/llama-bench/README.md
index 8578405646af7..fd95b35f46595 100644
--- a/examples/llama-bench/README.md
+++ b/examples/llama-bench/README.md
@@ -162,7 +162,7 @@ $ ./llama-bench -o csv
```
```csv
-build_commit,build_number,cuda,opencl,metal,gpu_blas,blas,cpu_info,gpu_info,model_filename,model_type,model_size,model_n_params,n_batch,n_threads,f16_kv,n_gpu_layers,main_gpu,mul_mat_q,tensor_split,n_prompt,n_gen,test_time,avg_ns,stddev_ns,avg_ts,stddev_ts
+build_commit,build_number,cuda,metal,gpu_blas,blas,cpu_info,gpu_info,model_filename,model_type,model_size,model_n_params,n_batch,n_threads,f16_kv,n_gpu_layers,main_gpu,mul_mat_q,tensor_split,n_prompt,n_gen,test_time,avg_ns,stddev_ns,avg_ts,stddev_ts
"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","llama 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","512","0","2023-09-23T12:09:01Z","212155977","732372","2413.341687","8.305961"
"3469684","1275","1","0","0","1","1","13th Gen Intel(R) Core(TM) i9-13900K","NVIDIA GeForce RTX 3090 Ti","models/7B/ggml-model-q4_0.gguf","llama 7B mostly Q4_0","3825065984","6738415616","512","16","1","99","0","1","0.00","0","128","2023-09-23T12:09:02Z","969320879","2728399","132.052051","0.371342"
```
@@ -179,7 +179,6 @@ $ ./llama-bench -o json
"build_commit": "3469684",
"build_number": 1275,
"cuda": true,
- "opencl": false,
"metal": false,
"gpu_blas": true,
"blas": true,
@@ -210,7 +209,6 @@ $ ./llama-bench -o json
"build_commit": "3469684",
"build_number": 1275,
"cuda": true,
- "opencl": false,
"metal": false,
"gpu_blas": true,
"blas": true,
@@ -253,7 +251,6 @@ CREATE TABLE IF NOT EXISTS test (
build_commit TEXT,
build_number INTEGER,
cuda INTEGER,
- opencl INTEGER,
metal INTEGER,
gpu_blas INTEGER,
blas INTEGER,
@@ -279,6 +276,6 @@ CREATE TABLE IF NOT EXISTS test (
stddev_ts REAL
);
-INSERT INTO test (build_commit, build_number, cuda, opencl, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '512', '0', '2023-09-23T12:10:30Z', '212693772', '743623', '2407.240204', '8.409634');
-INSERT INTO test (build_commit, build_number, cuda, opencl, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '0', '128', '2023-09-23T12:10:31Z', '977925003', '4037361', '130.891159', '0.537692');
+INSERT INTO test (build_commit, build_number, cuda, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '512', '0', '2023-09-23T12:10:30Z', '212693772', '743623', '2407.240204', '8.409634');
+INSERT INTO test (build_commit, build_number, cuda, metal, gpu_blas, blas, cpu_info, gpu_info, model_filename, model_type, model_size, model_n_params, n_batch, n_threads, f16_kv, n_gpu_layers, main_gpu, mul_mat_q, tensor_split, n_prompt, n_gen, test_time, avg_ns, stddev_ns, avg_ts, stddev_ts) VALUES ('3469684', '1275', '1', '0', '0', '1', '1', '13th Gen Intel(R) Core(TM) i9-13900K', 'NVIDIA GeForce RTX 3090 Ti', 'models/7B/ggml-model-q4_0.gguf', 'llama 7B mostly Q4_0', '3825065984', '6738415616', '512', '16', '1', '99', '0', '1', '0.00', '0', '128', '2023-09-23T12:10:31Z', '977925003', '4037361', '130.891159', '0.537692');
```
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 5d3cbd842dc69..5c31548a6c25c 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -41,20 +41,6 @@ static std::string join(const std::vector & values, const std::string & delim
return str.str();
}
-template
-static std::vector split(const std::string & str, char delim) {
- std::vector values;
- std::istringstream str_stream(str);
- std::string token;
- while (std::getline(str_stream, token, delim)) {
- T value;
- std::istringstream token_stream(token);
- token_stream >> value;
- values.push_back(value);
- }
- return values;
-}
-
template
static std::vector transform_to_str(const std::vector & values, F f) {
std::vector str_values;
@@ -322,28 +308,28 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.model.insert(params.model.end(), p.begin(), p.end());
} else if (arg == "-p" || arg == "--n-prompt") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.n_prompt.insert(params.n_prompt.end(), p.begin(), p.end());
} else if (arg == "-n" || arg == "--n-gen") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
} else if (arg == "-pg") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], ',');
+ auto p = string_split(argv[i], ',');
if (p.size() != 2) {
invalid_param = true;
break;
@@ -354,21 +340,21 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.n_batch.insert(params.n_batch.end(), p.begin(), p.end());
} else if (arg == "-ub" || arg == "--ubatch-size") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.n_ubatch.insert(params.n_ubatch.end(), p.begin(), p.end());
} else if (arg == "-ctk" || arg == "--cache-type-k") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
std::vector types;
for (const auto & t : p) {
ggml_type gt = ggml_type_from_name(t);
@@ -384,7 +370,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
std::vector types;
for (const auto & t : p) {
ggml_type gt = ggml_type_from_name(t);
@@ -400,14 +386,14 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.n_threads.insert(params.n_threads.end(), p.begin(), p.end());
} else if (arg == "-ngl" || arg == "--n-gpu-layers") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
} else if (arg == "-rpc" || arg == "--rpc") {
if (++i >= argc) {
@@ -420,7 +406,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
std::vector modes;
for (const auto & m : p) {
llama_split_mode mode;
@@ -442,13 +428,13 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
- params.main_gpu = split(argv[i], split_delim);
+ params.main_gpu = string_split(argv[i], split_delim);
} else if (arg == "-nkvo" || arg == "--no-kv-offload") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.no_kv_offload.insert(params.no_kv_offload.end(), p.begin(), p.end());
} else if (arg == "--numa") {
if (++i >= argc) {
@@ -466,28 +452,28 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.flash_attn.insert(params.flash_attn.end(), p.begin(), p.end());
} else if (arg == "-mmp" || arg == "--mmap") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.use_mmap.insert(params.use_mmap.end(), p.begin(), p.end());
} else if (arg == "-embd" || arg == "--embeddings") {
if (++i >= argc) {
invalid_param = true;
break;
}
- auto p = split(argv[i], split_delim);
+ auto p = string_split(argv[i], split_delim);
params.embeddings.insert(params.embeddings.end(), p.begin(), p.end());
} else if (arg == "-ts" || arg == "--tensor-split") {
if (++i >= argc) {
invalid_param = true;
break;
}
- for (auto ts : split(argv[i], split_delim)) {
+ for (auto ts : string_split(argv[i], split_delim)) {
// split string by ; and /
const std::regex regex{R"([;/]+)"};
std::sregex_token_iterator it{ts.begin(), ts.end(), regex, -1};
@@ -723,7 +709,6 @@ struct test {
static const std::string build_commit;
static const int build_number;
static const bool cuda;
- static const bool opencl;
static const bool vulkan;
static const bool kompute;
static const bool metal;
@@ -812,9 +797,6 @@ struct test {
if (cuda) {
return GGML_CUDA_NAME;
}
- if (opencl) {
- return "OpenCL";
- }
if (vulkan) {
return "Vulkan";
}
@@ -843,7 +825,7 @@ struct test {
static const std::vector & get_fields() {
static const std::vector fields = {
"build_commit", "build_number",
- "cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "rpc", "gpu_blas", "blas",
+ "cuda", "vulkan", "kompute", "metal", "sycl", "rpc", "gpu_blas", "blas",
"cpu_info", "gpu_info",
"model_filename", "model_type", "model_size", "model_n_params",
"n_batch", "n_ubatch",
@@ -869,7 +851,7 @@ struct test {
field == "avg_ns" || field == "stddev_ns") {
return INT;
}
- if (field == "cuda" || field == "opencl" || field == "vulkan" || field == "kompute" || field == "metal" ||
+ if (field == "cuda" || field == "vulkan" || field == "kompute" || field == "metal" ||
field == "gpu_blas" || field == "blas" || field == "sycl" ||field == "f16_kv" || field == "no_kv_offload" ||
field == "flash_attn" || field == "use_mmap" || field == "embeddings") {
return BOOL;
@@ -898,7 +880,7 @@ struct test {
}
std::vector values = {
build_commit, std::to_string(build_number),
- std::to_string(cuda), std::to_string(opencl), std::to_string(vulkan), std::to_string(vulkan),
+ std::to_string(cuda), std::to_string(vulkan), std::to_string(vulkan),
std::to_string(metal), std::to_string(sycl), std::to_string(rpc), std::to_string(gpu_blas), std::to_string(blas),
cpu_info, gpu_info,
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
@@ -927,7 +909,6 @@ struct test {
const std::string test::build_commit = LLAMA_COMMIT;
const int test::build_number = LLAMA_BUILD_NUMBER;
const bool test::cuda = !!ggml_cpu_has_cuda();
-const bool test::opencl = !!ggml_cpu_has_clblast();
const bool test::vulkan = !!ggml_cpu_has_vulkan();
const bool test::kompute = !!ggml_cpu_has_kompute();
const bool test::metal = !!ggml_cpu_has_metal();
diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp
index c974900f21e20..8c7dd2ae3d0dc 100644
--- a/examples/llava/llava-cli.cpp
+++ b/examples/llava/llava-cli.cpp
@@ -112,9 +112,12 @@ struct llava_context {
struct llama_model * model = NULL;
};
-static void show_additional_info(int /*argc*/, char ** argv) {
- LOG_TEE("\n example usage: %s -m --mmproj --image --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
- LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
+
+ LOG_TEE("\n example usage:\n");
+ LOG_TEE("\n %s -m --mmproj --image --image [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
+ LOG_TEE("\n note: a lower temperature value like 0.1 is recommended for better quality.\n");
}
static struct llava_image_embed * load_image(llava_context * ctx_llava, gpt_params * params, const std::string & fname) {
@@ -278,7 +281,7 @@ int main(int argc, char ** argv) {
gpt_params params;
if (!gpt_params_parse(argc, argv, params)) {
- show_additional_info(argc, argv);
+ print_usage(argc, argv, params);
return 1;
}
@@ -290,8 +293,7 @@ int main(int argc, char ** argv) {
#endif // LOG_DISABLE_LOGS
if (params.mmproj.empty() || (params.image.empty() && !prompt_contains_image(params.prompt))) {
- gpt_params_print_usage(argc, argv, params);
- show_additional_info(argc, argv);
+ print_usage(argc, argv, {});
return 1;
}
auto model = llava_init(¶ms);
diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp
index 54f060a85b263..fb20ad93f9c1d 100644
--- a/examples/lookahead/lookahead.cpp
+++ b/examples/lookahead/lookahead.cpp
@@ -37,7 +37,8 @@ struct ngram_container {
int main(int argc, char ** argv) {
gpt_params params;
- if (gpt_params_parse(argc, argv, params) == false) {
+ if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
diff --git a/examples/lookup/lookup-create.cpp b/examples/lookup/lookup-create.cpp
index 1c230c9667c71..d713f6f2194a8 100644
--- a/examples/lookup/lookup-create.cpp
+++ b/examples/lookup/lookup-create.cpp
@@ -14,8 +14,10 @@ int main(int argc, char ** argv){
gpt_params params;
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
+
// init llama.cpp
llama_backend_init();
llama_numa_init(params.numa);
diff --git a/examples/lookup/lookup-stats.cpp b/examples/lookup/lookup-stats.cpp
index 87ecc0a4f1394..0b171c87273d1 100644
--- a/examples/lookup/lookup-stats.cpp
+++ b/examples/lookup/lookup-stats.cpp
@@ -16,6 +16,7 @@ int main(int argc, char ** argv){
gpt_params params;
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp
index 83dbee91a8362..80ecd925d5962 100644
--- a/examples/lookup/lookup.cpp
+++ b/examples/lookup/lookup.cpp
@@ -15,6 +15,7 @@ int main(int argc, char ** argv){
gpt_params params;
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
diff --git a/examples/main-cmake-pkg/README.md b/examples/main-cmake-pkg/README.md
index edf20d8db6616..a88e92f23981f 100644
--- a/examples/main-cmake-pkg/README.md
+++ b/examples/main-cmake-pkg/README.md
@@ -8,16 +8,14 @@ Because this example is "outside of the source tree", it is important to first b
### Considerations
-When hardware acceleration libraries are used (e.g. CUDA, Metal, CLBlast, etc.), CMake must be able to locate the associated CMake package. In the example below, when building _main-cmake-pkg_ notice the `CMAKE_PREFIX_PATH` includes the Llama CMake package location _in addition to_ the CLBlast packageāwhich was used when compiling _llama.cpp_.
+When hardware acceleration libraries are used (e.g. CUDA, Metal, etc.), CMake must be able to locate the associated CMake package.
### Build llama.cpp and install to C:\LlamaCPP directory
-In this case, CLBlast was already installed so the CMake package is referenced in `CMAKE_PREFIX_PATH`.
-
```cmd
git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp
-cmake -B build -DBUILD_SHARED_LIBS=OFF -DLLAMA_CLBLAST=ON -DCMAKE_PREFIX_PATH=C:/CLBlast/lib/cmake/CLBlast -G "Visual Studio 17 2022" -A x64
+cmake -B build -DBUILD_SHARED_LIBS=OFF -G "Visual Studio 17 2022" -A x64
cmake --build build --config Release
cmake --install build --prefix C:/LlamaCPP
```
@@ -27,7 +25,7 @@ cmake --install build --prefix C:/LlamaCPP
```cmd
cd ..\examples\main-cmake-pkg
-cmake -B build -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/CLBlast/lib/cmake/CLBlast;C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64
+cmake -B build -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64
cmake --build build --config Release
cmake --install build --prefix C:/MyLlamaApp
```
diff --git a/examples/main/README.md b/examples/main/README.md
index ee930f4e79a0d..cdc002f151d4a 100644
--- a/examples/main/README.md
+++ b/examples/main/README.md
@@ -53,13 +53,13 @@ The following command generates "infinite" text from a starting prompt (you can
#### Unix-based systems (Linux, macOS, etc.):
```bash
-./main -m models/7B/ggml-model.bin --ignore-eos -n -1 --random-prompt
+./main -m models/7B/ggml-model.bin --ignore-eos -n -1
```
#### Windows:
```powershell
-main.exe -m models\7B\ggml-model.bin --ignore-eos -n -1 --random-prompt
+main.exe -m models\7B\ggml-model.bin --ignore-eos -n -1
```
## Common Options
@@ -69,7 +69,6 @@ In this section, we cover the most commonly used options for running the `main`
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`; inferred from `--model-url` if set).
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf).
- `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses.
-- `-ins, --instruct`: Run the program in instruction mode, which is particularly useful when working with Alpaca models.
- `-n N, --n-predict N`: Set the number of tokens to predict when generating text. Adjusting this value can influence the length of the generated text.
- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
@@ -80,11 +79,10 @@ The `main` program provides several ways to interact with the LLaMA models using
- `--prompt PROMPT`: Provide a prompt directly as a command-line option.
- `--file FNAME`: Provide a file containing a prompt or multiple prompts.
- `--interactive-first`: Run the program in interactive mode and wait for input right away. (More on this below.)
-- `--random-prompt`: Start with a randomized prompt.
## Interaction
-The `main` program offers a seamless way to interact with LLaMA models, allowing users to engage in real-time conversations or provide instructions for specific tasks. The interactive mode can be triggered using various options, including `--interactive`, `--interactive-first`, and `--instruct`.
+The `main` program offers a seamless way to interact with LLaMA models, allowing users to engage in real-time conversations or provide instructions for specific tasks. The interactive mode can be triggered using various options, including `--interactive` and `--interactive-first`.
In interactive mode, users can participate in text generation by injecting their input during the process. Users can press `Ctrl+C` at any time to interject and type their input, followed by pressing `Return` to submit it to the LLaMA model. To submit additional lines without finalizing input, users can end the current line with a backslash (`\`) and continue typing.
@@ -92,7 +90,6 @@ In interactive mode, users can participate in text generation by injecting their
- `-i, --interactive`: Run the program in interactive mode, allowing users to engage in real-time conversations or provide specific instructions to the model.
- `--interactive-first`: Run the program in interactive mode and immediately wait for user input before starting the text generation.
-- `-ins, --instruct`: Run the program in instruction mode, which is specifically designed to work with Alpaca models that excel in completing tasks based on user instructions.
- `--color`: Enable colorized output to differentiate visually distinguishing between prompts, user input, and generated text.
By understanding and utilizing these interaction options, you can create engaging and dynamic experiences with the LLaMA models, tailoring the text generation process to your specific needs.
@@ -121,16 +118,6 @@ The `--in-suffix` flag is used to add a suffix after your input. This is useful
./main -r "User:" --in-prefix " " --in-suffix "Assistant:"
```
-### Instruction Mode
-
-Instruction mode is particularly useful when working with Alpaca models, which are designed to follow user instructions for specific tasks:
-
-- `-ins, --instruct`: Enable instruction mode to leverage the capabilities of Alpaca models in completing tasks based on user-provided instructions.
-
-Technical detail: the user's input is internally prefixed with the reverse prompt (or `### Instruction:` as the default), and followed by `### Response:` (except if you just press Return without any input, to keep generating a longer response).
-
-By understanding and utilizing these interaction options, you can create engaging and dynamic experiences with the LLaMA models, tailoring the text generation process to your specific needs.
-
## Context Management
During text generation, LLaMA models have a limited context size, which means they can only consider a certain number of tokens from the input and generated text. When the context fills up, the model resets internally, potentially losing some information from the beginning of the conversation or instructions. Context management options help maintain continuity and coherence in these situations.
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index 44949ba869e70..b97b7b7937f02 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -122,8 +122,10 @@ int main(int argc, char ** argv) {
g_params = ¶ms;
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
+
llama_sampling_params & sparams = params.sparams;
#ifndef LOG_DISABLE_LOGS
@@ -180,9 +182,6 @@ int main(int argc, char ** argv) {
LOG_TEE("%s: seed = %u\n", __func__, params.seed);
std::mt19937 rng(params.seed);
- if (params.random_prompt) {
- params.prompt = string_random_prompt(rng);
- }
LOG("%s: llama backend init\n", __func__);
llama_backend_init();
@@ -250,11 +249,8 @@ int main(int argc, char ** argv) {
std::vector embd_inp;
- if (params.interactive_first || params.instruct || params.chatml || !params.prompt.empty() || session_tokens.empty()) {
+ if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
LOG("tokenize the prompt\n");
- if (params.chatml) {
- params.prompt = "<|im_start|>system\n" + params.prompt + "<|im_end|>";
- }
embd_inp = ::llama_tokenize(ctx, params.prompt, true, true);
} else {
LOG("use session tokens\n");
@@ -332,37 +328,13 @@ int main(int argc, char ** argv) {
}
// number of tokens to keep when resetting context
- if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size() || params.instruct || params.chatml) {
+ if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) {
params.n_keep = (int)embd_inp.size();
} else {
params.n_keep += add_bos; // always keep the BOS token
}
- // prefix & suffix for instruct mode
- const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true, true);
- const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false, true);
-
- LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str());
- LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str());
-
- // chatml prefix & suffix
- const auto cml_pfx = ::llama_tokenize(ctx, "\n<|im_start|>user\n", true, true);
- const auto cml_sfx = ::llama_tokenize(ctx, "<|im_end|>\n<|im_start|>assistant\n", false, true);
-
- LOG("cml_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_pfx).c_str());
- LOG("cml_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_sfx).c_str());
-
- // in instruct mode, we inject a prefix and a suffix to each input by the user
- if (params.instruct) {
- params.interactive_first = true;
- params.antiprompt.emplace_back("### Instruction:\n\n");
- }
- // similar for chatml mode
- else if (params.chatml) {
- params.interactive_first = true;
- params.antiprompt.emplace_back("<|im_start|>user\n");
- }
- else if (params.conversation) {
+ if (params.conversation) {
params.interactive_first = true;
}
@@ -823,15 +795,13 @@ int main(int argc, char ** argv) {
is_interacting = true;
printf("\n");
- } else if (params.instruct || params.chatml) {
- is_interacting = true;
}
}
if (n_past > 0 && is_interacting) {
LOG("waiting for user input\n");
- if (params.conversation || params.instruct || params.chatml) {
+ if (params.conversation) {
printf("\n> ");
}
@@ -874,24 +844,12 @@ int main(int argc, char ** argv) {
const size_t original_size = embd_inp.size();
- // instruct mode: insert instruction prefix
- if (params.instruct && !is_antiprompt) {
- LOG("inserting instruction prefix\n");
- n_consumed = embd_inp.size();
- embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
- }
- // chatml mode: insert user chat prefix
- if (params.chatml && !is_antiprompt) {
- LOG("inserting chatml prefix\n");
- n_consumed = embd_inp.size();
- embd_inp.insert(embd_inp.end(), cml_pfx.begin(), cml_pfx.end());
- }
if (params.escape) {
string_process_escapes(buffer);
}
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
- const auto line_inp = ::llama_tokenize(ctx, buffer, false, params.interactive_specials);
+ const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
@@ -900,17 +858,6 @@ int main(int argc, char ** argv) {
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
- // instruct mode: insert response suffix
- if (params.instruct) {
- LOG("inserting instruction suffix\n");
- embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
- }
- // chatml mode: insert assistant chat suffix
- if (params.chatml) {
- LOG("inserting chatml suffix\n");
- embd_inp.insert(embd_inp.end(), cml_sfx.begin(), cml_sfx.end());
- }
-
for (size_t i = original_size; i < embd_inp.size(); ++i) {
const llama_token token = embd_inp[i];
output_tokens.push_back(token);
@@ -935,7 +882,7 @@ int main(int argc, char ** argv) {
}
// end of generation
- if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.instruct || params.interactive || params.chatml)) {
+ if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.interactive)) {
LOG_TEE(" [end of text]\n");
break;
}
diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp
index c731abb726dc2..7faeaec975ae3 100644
--- a/examples/parallel/parallel.cpp
+++ b/examples/parallel/parallel.cpp
@@ -100,7 +100,8 @@ int main(int argc, char ** argv) {
gpt_params params;
- if (gpt_params_parse(argc, argv, params) == false) {
+ if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
diff --git a/examples/passkey/README.md b/examples/passkey/README.md
index 4a22bb55975be..9e7a119ba3e0b 100644
--- a/examples/passkey/README.md
+++ b/examples/passkey/README.md
@@ -8,5 +8,5 @@ See the following PRs for more info:
### Usage
```bash
-make -j && ./passkey ./models/llama-7b-v2/ggml-model-f16.gguf 250
+make -j && ./passkey -m ./models/llama-7b-v2/ggml-model-f16.gguf --junk 250
```
diff --git a/examples/passkey/passkey.cpp b/examples/passkey/passkey.cpp
index f2ef9ca10d4a2..d03215cd1e0a9 100644
--- a/examples/passkey/passkey.cpp
+++ b/examples/passkey/passkey.cpp
@@ -6,46 +6,32 @@
#include
#include
-int main(int argc, char ** argv) {
- gpt_params params;
-
- if (argc == 1 || argv[1][0] == '-') {
- printf("usage: %s MODEL_PATH N_JUNK N_GRP I_POS SEED\n" , argv[0]);
- return 1 ;
- }
-
- int seed = -1;
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
- int n_junk = 250; // number of times to repeat the junk text
- int n_keep = 32; // number of tokens in the prompt prefix
- int n_grp = 1; // if more than 1 - perform LongLM SelfExtend
- int i_pos = -1; // position of the passkey in the junk text
-
- if (argc >= 2) {
- params.model = argv[1];
- }
-
- if (argc >= 3) {
- n_junk = std::stoi(argv[2]);
- }
+ LOG_TEE("\nexample usage:\n");
+ LOG_TEE("\n %s -m model.gguf --junk 250 --pos 90 --keep 32 --grp-attn-n 2 [--seed 1234]\n", argv[0]);
+ LOG_TEE("\n");
+}
- if (argc >= 4) {
- n_grp = std::stoi(argv[3]);
- }
+int main(int argc, char ** argv) {
+ gpt_params params;
- if (argc >= 5) {
- i_pos = std::stoi(argv[4]);
- }
+ params.n_junk = 250;
+ params.n_keep = 32;
+ params.i_pos = -1;
- if (argc >= 6) {
- seed = std::stoi(argv[5]);
+ if (!gpt_params_parse(argc, argv, params)) {
+ print_usage(argc, argv, params);
+ return 1;
}
- if (seed == -1) {
- seed = time(NULL);
- }
+ srand(params.seed == LLAMA_DEFAULT_SEED ? time(NULL) : params.seed);
- srand(seed);
+ int n_junk = params.n_junk;
+ int n_keep = params.n_keep;
+ int n_grp = params.grp_attn_n;
+ int i_pos = params.i_pos;
if (i_pos == -1) {
i_pos = rand() % n_junk;
@@ -76,9 +62,7 @@ int main(int argc, char ** argv) {
// initialize the model
- llama_model_params model_params = llama_model_default_params();
-
- model_params.n_gpu_layers = 99; // offload all layers to the GPU
+ llama_model_params model_params = llama_model_params_from_gpt_params(params);
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
@@ -89,13 +73,9 @@ int main(int argc, char ** argv) {
// initialize the context
- llama_context_params ctx_params = llama_context_default_params();
+ llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
- ctx_params.seed = seed;
- ctx_params.n_ctx = llama_n_ctx_train(model)*n_grp + n_keep;
- ctx_params.n_batch = 512;
- ctx_params.n_threads = params.n_threads;
- ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
+ ctx_params.n_ctx = llama_n_ctx_train(model)*n_grp + n_keep;
GGML_ASSERT(ctx_params.n_batch % n_grp == 0 && "n_batch must be divisible by n_grp");
@@ -135,7 +115,7 @@ int main(int argc, char ** argv) {
LOG_TEE("prompt tokens: %d\n", n_tokens_all);
//LOG_TEE("prompt: %s\n", params.prompt.c_str());
- llama_batch batch = llama_batch_init(512, 0, 1);
+ llama_batch batch = llama_batch_init(params.n_batch, 0, 1);
int n_past = 0;
diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp
index 30e5e282ef5cf..0bd78c21a86a1 100644
--- a/examples/perplexity/perplexity.cpp
+++ b/examples/perplexity/perplexity.cpp
@@ -1032,7 +1032,7 @@ struct winogrande_entry {
std::vector seq_tokens[2];
};
-static std::vector load_winogrande_from_csv(const std::string& prompt) {
+static std::vector load_winogrande_from_csv(const std::string & prompt) {
std::vector result;
std::istringstream in(prompt);
std::string line;
@@ -1964,12 +1964,14 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
int main(int argc, char ** argv) {
gpt_params params;
+ params.n_ctx = 512;
+ params.logits_all = true;
+
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
- params.logits_all = true;
-
const int32_t n_ctx = params.n_ctx;
if (n_ctx <= 0) {
@@ -2006,9 +2008,6 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
std::mt19937 rng(params.seed);
- if (params.random_prompt) {
- params.prompt = string_random_prompt(rng);
- }
llama_backend_init();
llama_numa_init(params.numa);
@@ -2027,6 +2026,7 @@ int main(int argc, char ** argv) {
}
const int n_ctx_train = llama_n_ctx_train(model);
+
if (params.n_ctx > n_ctx_train) {
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
__func__, n_ctx_train, params.n_ctx);
diff --git a/examples/quantize/tests.sh b/examples/quantize/tests.sh
index a3ca74c68e7e5..38e28ffc365ee 100644
--- a/examples/quantize/tests.sh
+++ b/examples/quantize/tests.sh
@@ -47,7 +47,7 @@ echo PASS
echo
# 3a. Test the requanted model is loading properly
-$MAIN --model $WORK_PATH/ggml-model-requant-00001-of-00006.gguf --random-prompt --n-predict 32
+$MAIN --model $WORK_PATH/ggml-model-requant-00001-of-00006.gguf --n-predict 32
echo PASS
echo
@@ -57,7 +57,7 @@ echo PASS
echo
# 4b. Test the requanted model is loading properly
-$MAIN --model $WORK_PATH/ggml-model-requant-merge.gguf --random-prompt --n-predict 32
+$MAIN --model $WORK_PATH/ggml-model-requant-merge.gguf --n-predict 32
echo PASS
echo
diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp
index 4e7530706d4a9..55b7b2f70ae2a 100644
--- a/examples/retrieval/retrieval.cpp
+++ b/examples/retrieval/retrieval.cpp
@@ -4,72 +4,12 @@
#include
#include
-struct retrieval_params {
- std::vector context_files; // context files to embed
- int32_t chunk_size = 64; // chunk size for context embedding
- std::string chunk_separator = "\n"; // chunk separator for context embedding
-};
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
-static void retrieval_params_print_usage(int argc, char ** argv, gpt_params & gpt_params, retrieval_params & params) {
- gpt_params_print_usage(argc, argv, gpt_params);
- printf("retrieval options:\n");
- printf(" --context-file FNAME file containing context to embed.\n");
- printf(" specify multiple files by providing --context-file option multiple times.\n");
- printf(" --chunk-size N minimum length of embedded text chunk (default:%d)\n", params.chunk_size);
- printf(" --chunk-separator STRING\n");
- printf(" string to separate chunks (default: \"\\n\")\n");
- printf("\n");
-}
-
-static void retrieval_params_parse(int argc, char ** argv, gpt_params & gpt_params, retrieval_params & retrieval_params) {
- int i = 1;
- std::string arg;
- while (i < argc) {
- arg = argv[i];
- bool invalid_gpt_param = false;
- if(gpt_params_find_arg(argc, argv, argv[i], gpt_params, i, invalid_gpt_param)) {
- if (invalid_gpt_param) {
- fprintf(stderr, "error: invalid argument: %s\n", arg.c_str());
- retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
- exit(1);
- }
- // option was parsed by gpt_params_find_arg
- } else if (arg == "--context-file") {
- if (++i >= argc) {
- fprintf(stderr, "error: missing argument for --context-file\n");
- retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
- exit(1);
- }
- std::ifstream file(argv[i]);
- if (!file) {
- fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
- retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
- exit(1);
- }
- // store the external file name in params
- retrieval_params.context_files.push_back(argv[i]);
- } else if (arg == "--chunk-size") {
- if (++i >= argc) {
- fprintf(stderr, "error: missing argument for --chunk-size\n");
- retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
- exit(1);
- }
- retrieval_params.chunk_size = std::stoi(argv[i]);
- } else if (arg == "--chunk-separator") {
- if (++i >= argc) {
- fprintf(stderr, "error: missing argument for --chunk-separator\n");
- retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
- exit(1);
- }
- retrieval_params.chunk_separator = argv[i];
- } else {
- // unknown argument
- fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
- retrieval_params_print_usage(argc, argv, gpt_params, retrieval_params);
- exit(1);
- }
- i++;
- }
+ LOG_TEE("\nexample usage:\n");
+ LOG_TEE("\n %s --model ./models/bge-base-en-v1.5-f16.gguf --top-k 3 --context-file README.md --context-file License --chunk-size 100 --chunk-separator .\n", argv[0]);
+ LOG_TEE("\n");
}
struct chunk {
@@ -171,33 +111,35 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
int main(int argc, char ** argv) {
gpt_params params;
- retrieval_params retrieval_params;
- retrieval_params_parse(argc, argv, params, retrieval_params);
+ if (!gpt_params_parse(argc, argv, params)) {
+ print_usage(argc, argv, params);
+ return 1;
+ }
// For BERT models, batch size must be equal to ubatch size
params.n_ubatch = params.n_batch;
+ params.embedding = true;
- if (retrieval_params.chunk_size <= 0) {
+ if (params.chunk_size <= 0) {
fprintf(stderr, "chunk_size must be positive\n");
return 1;
}
- if (retrieval_params.context_files.empty()) {
+ if (params.context_files.empty()) {
fprintf(stderr, "context_files must be specified\n");
return 1;
}
- params.embedding = true;
print_build_info();
printf("processing files:\n");
- for (auto & context_file : retrieval_params.context_files) {
+ for (auto & context_file : params.context_files) {
printf("%s\n", context_file.c_str());
}
std::vector chunks;
- for (auto & context_file : retrieval_params.context_files) {
- std::vector file_chunk = chunk_file(context_file, retrieval_params.chunk_size, retrieval_params.chunk_separator);
+ for (auto & context_file : params.context_files) {
+ std::vector file_chunk = chunk_file(context_file, params.chunk_size, params.chunk_separator);
chunks.insert(chunks.end(), file_chunk.begin(), file_chunk.end());
}
printf("Number of chunks: %ld\n", chunks.size());
@@ -242,7 +184,7 @@ int main(int argc, char ** argv) {
return 1;
}
// add eos if not present
- if (inp.empty() || inp.back() != llama_token_eos(model)) {
+ if (llama_token_eos(model) >= 0 && (inp.empty() || inp.back() != llama_token_eos(model))) {
inp.push_back(llama_token_eos(model));
}
chunk.tokens = inp;
diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp
index c3b766882dbec..00c2277ac2827 100644
--- a/examples/save-load-state/save-load-state.cpp
+++ b/examples/save-load-state/save-load-state.cpp
@@ -11,6 +11,7 @@ int main(int argc, char ** argv) {
params.prompt = "The quick brown fox";
if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index fc6d90848f099..d581cad95974d 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -123,29 +123,6 @@ struct slot_params {
json input_suffix;
};
-struct server_params {
- int32_t port = 8080;
- int32_t read_timeout = 600;
- int32_t write_timeout = 600;
- int32_t n_threads_http = -1;
-
- std::string hostname = "127.0.0.1";
- std::string public_path = "";
- std::string chat_template = "";
- std::string system_prompt = "";
-
- std::vector api_keys;
-
-#ifdef CPPHTTPLIB_OPENSSL_SUPPORT
- std::string ssl_key_file = "";
- std::string ssl_cert_file = "";
-#endif
-
- bool slots_endpoint = true;
- bool metrics_endpoint = false;
- std::string slot_save_path;
-};
-
struct server_slot {
int id;
int id_task = -1;
@@ -1261,7 +1238,7 @@ struct server_context {
}
json get_formated_generation(const server_slot & slot) const {
- const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
+ const auto eos_bias = slot.sparams.logit_bias.find(llama_token_eos(model));
const bool ignore_eos = eos_bias != slot.sparams.logit_bias.end() && eos_bias->second < 0.0f && std::isinf(eos_bias->second);
std::vector samplers_sequence;
@@ -2334,561 +2311,6 @@ struct server_context {
}
};
-static void server_print_usage(const char * argv0, const gpt_params & params, const server_params & sparams) {
- printf("usage: %s [options]\n", argv0);
- printf("\n");
- printf("options:\n");
- printf(" -h, --help show this help message and exit\n");
- printf(" -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
- printf(" -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
- printf(" -tb N, --threads-batch N number of threads to use during batch and prompt processing (default: same as --threads)\n");
- printf(" --threads-http N number of threads in the http server pool to process requests (default: max(hardware concurrency - 1, --parallel N + 2))\n");
- printf(" -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
- printf(" --rope-scaling {none,linear,yarn}\n");
- printf(" RoPE frequency scaling method, defaults to linear unless specified by the model\n");
- printf(" --rope-freq-base N RoPE base frequency (default: loaded from model)\n");
- printf(" --rope-freq-scale N RoPE frequency scaling factor, expands context by a factor of 1/N\n");
- printf(" --yarn-ext-factor N YaRN: extrapolation mix factor (default: 1.0, 0.0 = full interpolation)\n");
- printf(" --yarn-attn-factor N YaRN: scale sqrt(t) or attention magnitude (default: 1.0)\n");
- printf(" --yarn-beta-slow N YaRN: high correction dim or alpha (default: %.1f)\n", params.yarn_beta_slow);
- printf(" --yarn-beta-fast N YaRN: low correction dim or beta (default: %.1f)\n", params.yarn_beta_fast);
- printf(" --pooling {none,mean,cls} pooling type for embeddings, use model default if unspecified\n");
- printf(" -dt N, --defrag-thold N\n");
- printf(" KV cache defragmentation threshold (default: %.1f, < 0 - disabled)\n", params.defrag_thold);
- printf(" -b N, --batch-size N logical maximum batch size (default: %d)\n", params.n_batch);
- printf(" -ub N, --ubatch-size N physical maximum batch size (default: %d)\n", params.n_ubatch);
- if (llama_supports_mlock()) {
- printf(" --mlock force system to keep model in RAM rather than swapping or compressing\n");
- }
- if (llama_supports_mmap()) {
- printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
- }
- printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
- printf(" - distribute: spread execution evenly over all nodes\n");
- printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
- printf(" - numactl: use the CPU map provided my numactl\n");
- if (llama_supports_gpu_offload()) {
- printf(" -ngl N, --n-gpu-layers N\n");
- printf(" number of layers to store in VRAM\n");
- printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n");
- printf(" how to split the model across multiple GPUs, one of:\n");
- printf(" - none: use one GPU only\n");
- printf(" - layer (default): split layers and KV across GPUs\n");
- printf(" - row: split rows across GPUs\n");
- printf(" -ts SPLIT --tensor-split SPLIT\n");
- printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n");
- printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n");
- printf(" or for intermediate results and KV (with split-mode = row)\n");
- printf(" -nkvo, --no-kv-offload\n");
- printf(" disable KV offload\n");
- }
- printf(" -m FNAME, --model FNAME\n");
- printf(" model path (default: models/$filename with filename from --hf-file or --model-url if set, otherwise %s)\n", DEFAULT_MODEL_PATH);
- printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
- printf(" model download url (default: unused)\n");
- printf(" -hfr REPO, --hf-repo REPO\n");
- printf(" Hugging Face model repository (default: unused)\n");
- printf(" -hff FILE, --hf-file FILE\n");
- printf(" Hugging Face model file (default: unused)\n");
- printf(" -a ALIAS, --alias ALIAS\n");
- printf(" set an alias for the model, will be added as `model` field in completion response\n");
- printf(" --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
- printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
- printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str());
- printf(" --port PORT port to listen (default (default: %d)\n", sparams.port);
- printf(" --rpc SERVERS comma separated list of RPC servers\n");
- printf(" --path PUBLIC_PATH path from which to serve static files (default: disabled)\n");
- printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n");
- printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n");
-#ifdef CPPHTTPLIB_OPENSSL_SUPPORT
- printf(" --ssl-key-file FNAME path to file a PEM-encoded SSL private key\n");
- printf(" --ssl-cert-file FNAME path to file a PEM-encoded SSL certificate\n");
-#endif
- printf(" -to N, --timeout N server read/write timeout in seconds (default: %d)\n", sparams.read_timeout);
- printf(" --embeddings enable embedding vector output (default: %s)\n", params.embedding ? "enabled" : "disabled");
- printf(" -np N, --parallel N number of slots for process requests (default: %d)\n", params.n_parallel);
- printf(" -cb, --cont-batching enable continuous batching (a.k.a dynamic batching) (default: enabled)\n");
- printf(" -fa, --flash-attn enable Flash Attention (default: %s)\n", params.flash_attn ? "enabled" : "disabled");
- printf(" -spf FNAME, --system-prompt-file FNAME\n");
- printf(" set a file to load a system prompt (initial prompt of all slots), this is useful for chat applications.\n");
- printf(" -ctk TYPE, --cache-type-k TYPE\n");
- printf(" KV cache data type for K (default: f16)\n");
- printf(" -ctv TYPE, --cache-type-v TYPE\n");
- printf(" KV cache data type for V (default: f16)\n");
- printf(" --log-format log output format: json or text (default: json)\n");
- printf(" --log-disable disables logging to a file.\n");
- printf(" --slots-endpoint-disable disables slots monitoring endpoint.\n");
- printf(" --metrics enable prometheus compatible metrics endpoint (default: %s).\n", sparams.metrics_endpoint ? "enabled" : "disabled");
- printf(" --slot-save-path PATH path to save slot kv cache (default: disabled)\n");
- printf("\n");
- printf(" -n, --n-predict maximum tokens to predict (default: %d)\n", params.n_predict);
- printf(" --override-kv KEY=TYPE:VALUE\n");
- printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
- printf(" types: int, float, bool, str. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
- printf(" -gan N, --grp-attn-n N set the group attention factor to extend context size through self-extend(default: 1=disabled), used together with group attention width `--grp-attn-w`\n");
- printf(" -gaw N, --grp-attn-w N set the group attention width to extend context size through self-extend(default: 512), used together with group attention factor `--grp-attn-n`\n");
- printf(" --chat-template JINJA_TEMPLATE\n");
- printf(" set custom jinja chat template (default: template taken from model's metadata)\n");
- printf(" only commonly used templates are accepted:\n");
- printf(" https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template\n");
- printf("\n");
-}
-
-static void server_params_parse(int argc, char ** argv, server_params & sparams, gpt_params & params) {
- gpt_params default_params;
- server_params default_sparams;
-
- std::string arg;
- bool invalid_param = false;
-
- for (int i = 1; i < argc; i++) {
- arg = argv[i];
- if (arg == "--port") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.port = std::stoi(argv[i]);
- } else if (arg == "--rpc") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.rpc_servers = argv[i];
- } else if (arg == "--host") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.hostname = argv[i];
- } else if (arg == "--path") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.public_path = argv[i];
- } else if (arg == "--api-key") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.api_keys.push_back(argv[i]);
- } else if (arg == "--api-key-file") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- std::ifstream key_file(argv[i]);
- if (!key_file) {
- fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
- invalid_param = true;
- break;
- }
- std::string key;
- while (std::getline(key_file, key)) {
- if (key.size() > 0) {
- sparams.api_keys.push_back(key);
- }
- }
- key_file.close();
-
- }
-#ifdef CPPHTTPLIB_OPENSSL_SUPPORT
- else if (arg == "--ssl-key-file") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.ssl_key_file = argv[i];
- } else if (arg == "--ssl-cert-file") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.ssl_cert_file = argv[i];
- }
-#endif
- else if (arg == "--timeout" || arg == "-to") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.read_timeout = std::stoi(argv[i]);
- sparams.write_timeout = std::stoi(argv[i]);
- } else if (arg == "-m" || arg == "--model") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.model = argv[i];
- } else if (arg == "-mu" || arg == "--model-url") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.model_url = argv[i];
- } else if (arg == "-hfr" || arg == "--hf-repo") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.hf_repo = argv[i];
- } else if (arg == "-hff" || arg == "--hf-file") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.hf_file = argv[i];
- } else if (arg == "-a" || arg == "--alias") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.model_alias = argv[i];
- } else if (arg == "-h" || arg == "--help") {
- server_print_usage(argv[0], default_params, default_sparams);
- exit(0);
- } else if (arg == "-c" || arg == "--ctx-size" || arg == "--ctx_size") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.n_ctx = std::stoi(argv[i]);
- } else if (arg == "--rope-scaling") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- std::string value(argv[i]);
- /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
- else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
- else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
- else { invalid_param = true; break; }
- } else if (arg == "--rope-freq-base") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.rope_freq_base = std::stof(argv[i]);
- } else if (arg == "--rope-freq-scale") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.rope_freq_scale = std::stof(argv[i]);
- } else if (arg == "--yarn-ext-factor") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.yarn_ext_factor = std::stof(argv[i]);
- }
- else if (arg == "--yarn-attn-factor") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.yarn_attn_factor = std::stof(argv[i]);
- } else if (arg == "--yarn-beta-fast") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.yarn_beta_fast = std::stof(argv[i]);
- } else if (arg == "--yarn-beta-slow") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.yarn_beta_slow = std::stof(argv[i]);
- } else if (arg == "--pooling") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- std::string value(argv[i]);
- /**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
- else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
- else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
- else { invalid_param = true; break; }
- } else if (arg == "--defrag-thold" || arg == "-dt") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.defrag_thold = std::stof(argv[i]);
- } else if (arg == "--threads" || arg == "-t") {
- if (++i >= argc)
- {
- invalid_param = true;
- break;
- }
- params.n_threads = std::stoi(argv[i]);
- } else if (arg == "--grp-attn-n" || arg == "-gan") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
-
- params.grp_attn_n = std::stoi(argv[i]);
- } else if (arg == "--grp-attn-w" || arg == "-gaw") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
-
- params.grp_attn_w = std::stoi(argv[i]);
- } else if (arg == "--threads-batch" || arg == "-tb") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.n_threads_batch = std::stoi(argv[i]);
- } else if (arg == "--threads-http") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.n_threads_http = std::stoi(argv[i]);
- } else if (arg == "-b" || arg == "--batch-size") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.n_batch = std::stoi(argv[i]);
- } else if (arg == "-ub" || arg == "--ubatch-size") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.n_ubatch = std::stoi(argv[i]);
- } else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- if (llama_supports_gpu_offload()) {
- params.n_gpu_layers = std::stoi(argv[i]);
- } else {
- LOG_WARNING(
- "Not compiled with GPU offload support, --n-gpu-layers option will be ignored. "
- "See main README.md for information on enabling GPU BLAS support",
- {{"n_gpu_layers", params.n_gpu_layers}});
- }
- } else if (arg == "-nkvo" || arg == "--no-kv-offload") {
- params.no_kv_offload = true;
- } else if (arg == "--split-mode" || arg == "-sm") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- std::string arg_next = argv[i];
- if (arg_next == "none") {
- params.split_mode = LLAMA_SPLIT_MODE_NONE;
- } else if (arg_next == "layer") {
- params.split_mode = LLAMA_SPLIT_MODE_LAYER;
- } else if (arg_next == "row") {
- params.split_mode = LLAMA_SPLIT_MODE_ROW;
- } else {
- invalid_param = true;
- break;
- }
-#ifndef GGML_USE_CUDA
- fprintf(stderr, "warning: llama.cpp was compiled without CUDA. Setting the split mode has no effect.\n");
-#endif // GGML_USE_CUDA
- } else if (arg == "--tensor-split" || arg == "-ts") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
-#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
- std::string arg_next = argv[i];
-
- // split string by , and /
- const std::regex regex{R"([,/]+)"};
- std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
- std::vector split_arg{it, {}};
- GGML_ASSERT(split_arg.size() <= llama_max_devices());
-
- for (size_t i_device = 0; i_device < llama_max_devices(); ++i_device) {
- if (i_device < split_arg.size()) {
- params.tensor_split[i_device] = std::stof(split_arg[i_device]);
- } else {
- params.tensor_split[i_device] = 0.0f;
- }
- }
-#else
- LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a tensor split.\n", {});
-#endif // GGML_USE_CUDA
- } else if (arg == "--main-gpu" || arg == "-mg") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
-#if defined(GGML_USE_CUDA) || defined(GGML_USE_SYCL)
- params.main_gpu = std::stoi(argv[i]);
-#else
- LOG_WARNING("llama.cpp was compiled without CUDA. It is not possible to set a main GPU.", {});
-#endif
- } else if (arg == "--lora") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.lora_adapter.emplace_back(argv[i], 1.0f);
- params.use_mmap = false;
- } else if (arg == "--lora-scaled") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- const char * lora_adapter = argv[i];
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.lora_adapter.emplace_back(lora_adapter, std::stof(argv[i]));
- params.use_mmap = false;
- } else if (arg == "--lora-base") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.lora_base = argv[i];
- } else if (arg == "-v" || arg == "--verbose") {
-#if SERVER_VERBOSE != 1
- LOG_WARNING("server.cpp is not built with verbose logging.", {});
-#else
- server_verbose = true;
-#endif
- } else if (arg == "--mlock") {
- params.use_mlock = true;
- } else if (arg == "--no-mmap") {
- params.use_mmap = false;
- } else if (arg == "--numa") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- } else {
- std::string value(argv[i]);
- /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
- else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
- else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
- else { invalid_param = true; break; }
- }
- } else if (arg == "--embedding" || arg == "--embeddings") {
- params.embedding = true;
- } else if (arg == "-cb" || arg == "--cont-batching") {
- params.cont_batching = true;
- } else if (arg == "-fa" || arg == "--flash-attn") {
- params.flash_attn = true;
- } else if (arg == "-np" || arg == "--parallel") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.n_parallel = std::stoi(argv[i]);
- } else if (arg == "-n" || arg == "--n-predict") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- params.n_predict = std::stoi(argv[i]);
- } else if (arg == "-spf" || arg == "--system-prompt-file") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- std::ifstream file(argv[i]);
- if (!file) {
- fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
- invalid_param = true;
- break;
- }
- std::string system_prompt;
- std::copy(
- std::istreambuf_iterator(file),
- std::istreambuf_iterator(),
- std::back_inserter(system_prompt)
- );
- sparams.system_prompt = system_prompt;
- } else if (arg == "-ctk" || arg == "--cache-type-k") {
- params.cache_type_k = argv[++i];
- } else if (arg == "-ctv" || arg == "--cache-type-v") {
- params.cache_type_v = argv[++i];
- } else if (arg == "--log-format") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- if (std::strcmp(argv[i], "json") == 0) {
- server_log_json = true;
- } else if (std::strcmp(argv[i], "text") == 0) {
- server_log_json = false;
- } else {
- invalid_param = true;
- break;
- }
- } else if (arg == "--log-disable") {
- log_set_target(stdout);
- LOG_INFO("logging to file is disabled.", {});
- } else if (arg == "--slots-endpoint-disable") {
- sparams.slots_endpoint = false;
- } else if (arg == "--metrics") {
- sparams.metrics_endpoint = true;
- } else if (arg == "--slot-save-path") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- sparams.slot_save_path = argv[i];
- // if doesn't end with DIRECTORY_SEPARATOR, add it
- if (!sparams.slot_save_path.empty() && sparams.slot_save_path[sparams.slot_save_path.size() - 1] != DIRECTORY_SEPARATOR) {
- sparams.slot_save_path += DIRECTORY_SEPARATOR;
- }
- } else if (arg == "--chat-template") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- if (!verify_custom_template(argv[i])) {
- fprintf(stderr, "error: the supplied chat template is not supported: %s\n", argv[i]);
- fprintf(stderr, "note: llama.cpp does not use jinja parser, we only support commonly used templates\n");
- invalid_param = true;
- break;
- }
- sparams.chat_template = argv[i];
- } else if (arg == "--override-kv") {
- if (++i >= argc) {
- invalid_param = true;
- break;
- }
- if (!string_parse_kv_override(argv[i], params.kv_overrides)) {
- fprintf(stderr, "error: Invalid type for KV override: %s\n", argv[i]);
- invalid_param = true;
- break;
- }
- } else {
- fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
- server_print_usage(argv[0], default_params, default_sparams);
- exit(1);
- }
- }
-
- gpt_params_handle_model_default(params);
-
- if (!params.kv_overrides.empty()) {
- params.kv_overrides.emplace_back();
- params.kv_overrides.back().key[0] = 0;
- }
-
- if (invalid_param) {
- fprintf(stderr, "error: invalid parameter for argument: %s\n", arg.c_str());
- server_print_usage(argv[0], default_params, default_sparams);
- exit(1);
- }
-}
-
static void log_server_request(const httplib::Request & req, const httplib::Response & res) {
// skip GH copilot requests when using default port
if (req.path == "/v1/health" || req.path == "/v1/completions") {
@@ -2929,16 +2351,22 @@ int main(int argc, char ** argv) {
log_disable();
#endif
// own arguments required by this example
- gpt_params params;
- server_params sparams;
+ gpt_params params;
+
+ if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
+ return 1;
+ }
+
+ // TODO: not great to use extern vars
+ server_log_json = params.log_json;
+ server_verbose = params.verbose;
// struct that contains llama context and inference
server_context ctx_server;
- server_params_parse(argc, argv, sparams, params);
-
- if (!sparams.system_prompt.empty()) {
- ctx_server.system_prompt_set(sparams.system_prompt);
+ if (!params.system_prompt.empty()) {
+ ctx_server.system_prompt_set(params.system_prompt);
}
if (params.model_alias == "unknown") {
@@ -2962,10 +2390,10 @@ int main(int argc, char ** argv) {
std::unique_ptr svr;
#ifdef CPPHTTPLIB_OPENSSL_SUPPORT
- if (sparams.ssl_key_file != "" && sparams.ssl_cert_file != "") {
- LOG_INFO("Running with SSL", {{"key", sparams.ssl_key_file}, {"cert", sparams.ssl_cert_file}});
+ if (params.ssl_file_key != "" && params.ssl_file_cert != "") {
+ LOG_INFO("Running with SSL", {{"key", params.ssl_file_key}, {"cert", params.ssl_file_cert}});
svr.reset(
- new httplib::SSLServer(sparams.ssl_cert_file.c_str(), sparams.ssl_key_file.c_str())
+ new httplib::SSLServer(params.ssl_file_cert.c_str(), params.ssl_file_key.c_str())
);
} else {
LOG_INFO("Running without SSL", {});
@@ -3019,24 +2447,24 @@ int main(int argc, char ** argv) {
});
// set timeouts and change hostname and port
- svr->set_read_timeout (sparams.read_timeout);
- svr->set_write_timeout(sparams.write_timeout);
+ svr->set_read_timeout (params.timeout_read);
+ svr->set_write_timeout(params.timeout_write);
- if (!svr->bind_to_port(sparams.hostname, sparams.port)) {
- fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", sparams.hostname.c_str(), sparams.port);
+ if (!svr->bind_to_port(params.hostname, params.port)) {
+ fprintf(stderr, "\ncouldn't bind to server socket: hostname=%s port=%d\n\n", params.hostname.c_str(), params.port);
return 1;
}
std::unordered_map log_data;
- log_data["hostname"] = sparams.hostname;
- log_data["port"] = std::to_string(sparams.port);
+ log_data["hostname"] = params.hostname;
+ log_data["port"] = std::to_string(params.port);
- if (sparams.api_keys.size() == 1) {
- auto key = sparams.api_keys[0];
+ if (params.api_keys.size() == 1) {
+ auto key = params.api_keys[0];
log_data["api_key"] = "api_key: ****" + key.substr(std::max((int)(key.length() - 4), 0));
- } else if (sparams.api_keys.size() > 1) {
- log_data["api_key"] = "api_key: " + std::to_string(sparams.api_keys.size()) + " keys loaded";
+ } else if (params.api_keys.size() > 1) {
+ log_data["api_key"] = "api_key: " + std::to_string(params.api_keys.size()) + " keys loaded";
}
// load the model
@@ -3053,10 +2481,10 @@ int main(int argc, char ** argv) {
const auto model_meta = ctx_server.model_meta();
// if a custom chat template is not supplied, we will use the one that comes with the model (if any)
- if (sparams.chat_template.empty()) {
+ if (params.chat_template.empty()) {
if (!ctx_server.validate_model_chat_template()) {
LOG_ERROR("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
- sparams.chat_template = "chatml";
+ params.chat_template = "chatml";
}
}
@@ -3068,11 +2496,11 @@ int main(int argc, char ** argv) {
chat.push_back({{"role", "assistant"}, {"content", "Hi there"}});
chat.push_back({{"role", "user"}, {"content", "How are you?"}});
- const std::string chat_example = format_chat(ctx_server.model, sparams.chat_template, chat);
+ const std::string chat_example = format_chat(ctx_server.model, params.chat_template, chat);
LOG_INFO("chat template", {
{"chat_example", chat_example},
- {"built_in", sparams.chat_template.empty()},
+ {"built_in", params.chat_template.empty()},
});
}
@@ -3080,7 +2508,7 @@ int main(int argc, char ** argv) {
// Middlewares
//
- auto middleware_validate_api_key = [&sparams, &res_error](const httplib::Request & req, httplib::Response & res) {
+ auto middleware_validate_api_key = [¶ms, &res_error](const httplib::Request & req, httplib::Response & res) {
// TODO: should we apply API key to all endpoints, including "/health" and "/models"?
static const std::set protected_endpoints = {
"/props",
@@ -3098,7 +2526,7 @@ int main(int argc, char ** argv) {
};
// If API key is not set, skip validation
- if (sparams.api_keys.empty()) {
+ if (params.api_keys.empty()) {
return true;
}
@@ -3113,7 +2541,7 @@ int main(int argc, char ** argv) {
std::string prefix = "Bearer ";
if (auth_header.substr(0, prefix.size()) == prefix) {
std::string received_api_key = auth_header.substr(prefix.size());
- if (std::find(sparams.api_keys.begin(), sparams.api_keys.end(), received_api_key) != sparams.api_keys.end()) {
+ if (std::find(params.api_keys.begin(), params.api_keys.end(), received_api_key) != params.api_keys.end()) {
return true; // API key is valid
}
}
@@ -3168,7 +2596,7 @@ int main(int argc, char ** argv) {
};
res.status = 200; // HTTP OK
- if (sparams.slots_endpoint && req.has_param("include_slots")) {
+ if (params.endpoint_slots && req.has_param("include_slots")) {
health["slots"] = result.data.at("slots");
}
@@ -3194,7 +2622,7 @@ int main(int argc, char ** argv) {
};
const auto handle_slots = [&](const httplib::Request &, httplib::Response & res) {
- if (!sparams.slots_endpoint) {
+ if (!params.endpoint_slots) {
res_error(res, format_error_response("This server does not support slots endpoint.", ERROR_TYPE_NOT_SUPPORTED));
return;
}
@@ -3218,7 +2646,7 @@ int main(int argc, char ** argv) {
};
const auto handle_metrics = [&](const httplib::Request &, httplib::Response & res) {
- if (!sparams.metrics_endpoint) {
+ if (!params.endpoint_metrics) {
res_error(res, format_error_response("This server does not support metrics endpoint.", ERROR_TYPE_NOT_SUPPORTED));
return;
}
@@ -3318,14 +2746,14 @@ int main(int argc, char ** argv) {
res.status = 200; // HTTP OK
};
- const auto handle_slots_save = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
+ const auto handle_slots_save = [&ctx_server, &res_error, ¶ms](const httplib::Request & req, httplib::Response & res, int id_slot) {
json request_data = json::parse(req.body);
std::string filename = request_data.at("filename");
if (!fs_validate_filename(filename)) {
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
return;
}
- std::string filepath = sparams.slot_save_path + filename;
+ std::string filepath = params.slot_save_path + filename;
server_task task;
task.type = SERVER_TASK_TYPE_SLOT_SAVE;
@@ -3348,14 +2776,14 @@ int main(int argc, char ** argv) {
}
};
- const auto handle_slots_restore = [&ctx_server, &res_error, &sparams](const httplib::Request & req, httplib::Response & res, int id_slot) {
+ const auto handle_slots_restore = [&ctx_server, &res_error, ¶ms](const httplib::Request & req, httplib::Response & res, int id_slot) {
json request_data = json::parse(req.body);
std::string filename = request_data.at("filename");
if (!fs_validate_filename(filename)) {
res_error(res, format_error_response("Invalid filename", ERROR_TYPE_INVALID_REQUEST));
return;
}
- std::string filepath = sparams.slot_save_path + filename;
+ std::string filepath = params.slot_save_path + filename;
server_task task;
task.type = SERVER_TASK_TYPE_SLOT_RESTORE;
@@ -3530,9 +2958,9 @@ int main(int argc, char ** argv) {
res.set_content(models.dump(), "application/json; charset=utf-8");
};
- const auto handle_chat_completions = [&ctx_server, &sparams, &res_error](const httplib::Request & req, httplib::Response & res) {
+ const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error](const httplib::Request & req, httplib::Response & res) {
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
- json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), sparams.chat_template);
+ json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template);
const int id_task = ctx_server.queue_tasks.get_new_id();
@@ -3757,29 +3185,29 @@ int main(int argc, char ** argv) {
//
// register static assets routes
- if (!sparams.public_path.empty()) {
+ if (!params.public_path.empty()) {
// Set the base directory for serving static files
- svr->set_base_dir(sparams.public_path);
+ svr->set_base_dir(params.public_path);
}
+
// using embedded static files
- svr->Get("/", handle_static_file(index_html, index_html_len, "text/html; charset=utf-8"));
- svr->Get("/index.js", handle_static_file(index_js, index_js_len, "text/javascript; charset=utf-8"));
- svr->Get("/completion.js", handle_static_file(completion_js, completion_js_len, "text/javascript; charset=utf-8"));
- svr->Get("/json-schema-to-grammar.mjs", handle_static_file(
- json_schema_to_grammar_mjs, json_schema_to_grammar_mjs_len, "text/javascript; charset=utf-8"));
+ svr->Get("/", handle_static_file(index_html, index_html_len, "text/html; charset=utf-8"));
+ svr->Get("/index.js", handle_static_file(index_js, index_js_len, "text/javascript; charset=utf-8"));
+ svr->Get("/completion.js", handle_static_file(completion_js, completion_js_len, "text/javascript; charset=utf-8"));
+ svr->Get("/json-schema-to-grammar.mjs", handle_static_file(json_schema_to_grammar_mjs, json_schema_to_grammar_mjs_len, "text/javascript; charset=utf-8"));
// add new-ui files
- svr->Get("/colorthemes.css", handle_static_file(colorthemes_css, colorthemes_css_len, "text/css; charset=utf-8"));
- svr->Get("/style.css", handle_static_file(style_css, style_css_len, "text/css; charset=utf-8"));
+ svr->Get("/colorthemes.css", handle_static_file(colorthemes_css, colorthemes_css_len, "text/css; charset=utf-8"));
+ svr->Get("/style.css", handle_static_file(style_css, style_css_len, "text/css; charset=utf-8"));
svr->Get("/theme-beeninorder.css", handle_static_file(theme_beeninorder_css, theme_beeninorder_css_len, "text/css; charset=utf-8"));
- svr->Get("/theme-ketivah.css", handle_static_file(theme_ketivah_css, theme_ketivah_css_len, "text/css; charset=utf-8"));
- svr->Get("/theme-mangotango.css", handle_static_file(theme_mangotango_css, theme_mangotango_css_len, "text/css; charset=utf-8"));
- svr->Get("/theme-playground.css", handle_static_file(theme_playground_css, theme_playground_css_len, "text/css; charset=utf-8"));
- svr->Get("/theme-polarnight.css", handle_static_file(theme_polarnight_css, theme_polarnight_css_len, "text/css; charset=utf-8"));
- svr->Get("/theme-snowstorm.css", handle_static_file(theme_snowstorm_css, theme_snowstorm_css_len, "text/css; charset=utf-8"));
- svr->Get("/index-new.html", handle_static_file(index_new_html, index_new_html_len, "text/html; charset=utf-8"));
- svr->Get("/system-prompts.js", handle_static_file(system_prompts_js, system_prompts_js_len, "text/javascript; charset=utf-8"));
- svr->Get("/prompt-formats.js", handle_static_file(prompt_formats_js, prompt_formats_js_len, "text/javascript; charset=utf-8"));
+ svr->Get("/theme-ketivah.css", handle_static_file(theme_ketivah_css, theme_ketivah_css_len, "text/css; charset=utf-8"));
+ svr->Get("/theme-mangotango.css", handle_static_file(theme_mangotango_css, theme_mangotango_css_len, "text/css; charset=utf-8"));
+ svr->Get("/theme-playground.css", handle_static_file(theme_playground_css, theme_playground_css_len, "text/css; charset=utf-8"));
+ svr->Get("/theme-polarnight.css", handle_static_file(theme_polarnight_css, theme_polarnight_css_len, "text/css; charset=utf-8"));
+ svr->Get("/theme-snowstorm.css", handle_static_file(theme_snowstorm_css, theme_snowstorm_css_len, "text/css; charset=utf-8"));
+ svr->Get("/index-new.html", handle_static_file(index_new_html, index_new_html_len, "text/html; charset=utf-8"));
+ svr->Get("/system-prompts.js", handle_static_file(system_prompts_js, system_prompts_js_len, "text/javascript; charset=utf-8"));
+ svr->Get("/prompt-formats.js", handle_static_file(prompt_formats_js, prompt_formats_js_len, "text/javascript; charset=utf-8"));
// register API routes
svr->Get ("/health", handle_health);
@@ -3798,7 +3226,7 @@ int main(int argc, char ** argv) {
svr->Post("/v1/embeddings", handle_embeddings);
svr->Post("/tokenize", handle_tokenize);
svr->Post("/detokenize", handle_detokenize);
- if (!sparams.slot_save_path.empty()) {
+ if (!params.slot_save_path.empty()) {
// only enable slot endpoints if slot_save_path is set
svr->Post("/slots/:id_slot", handle_slots_action);
}
@@ -3806,12 +3234,12 @@ int main(int argc, char ** argv) {
//
// Start the server
//
- if (sparams.n_threads_http < 1) {
+ if (params.n_threads_http < 1) {
// +2 threads for monitoring endpoints
- sparams.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
+ params.n_threads_http = std::max(params.n_parallel + 2, (int32_t) std::thread::hardware_concurrency() - 1);
}
- log_data["n_threads_http"] = std::to_string(sparams.n_threads_http);
- svr->new_task_queue = [&sparams] { return new httplib::ThreadPool(sparams.n_threads_http); };
+ log_data["n_threads_http"] = std::to_string(params.n_threads_http);
+ svr->new_task_queue = [¶ms] { return new httplib::ThreadPool(params.n_threads_http); };
LOG_INFO("HTTP server listening", log_data);
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp
index d8a2286e4b1df..b7bfb41d35edc 100644
--- a/examples/server/utils.hpp
+++ b/examples/server/utils.hpp
@@ -116,13 +116,6 @@ static inline void server_log(const char * level, const char * function, int lin
// chat template utils
//
-// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid
-inline bool verify_custom_template(const std::string & tmpl) {
- llama_chat_message chat[] = {{"user", "test"}};
- int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0);
- return res >= 0;
-}
-
// Format given chat. If tmpl is empty, we take the template from model metadata
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector & messages) {
size_t alloc_size = 0;
diff --git a/examples/simple/README.md b/examples/simple/README.md
index 5d24b1046935c..49e24501cc02b 100644
--- a/examples/simple/README.md
+++ b/examples/simple/README.md
@@ -3,7 +3,7 @@
The purpose of this example is to demonstrate a minimal usage of llama.cpp for generating text with a given prompt.
```bash
-./simple ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is"
+./simple -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is"
...
diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp
index b0f8e0fdc4987..69a92cf7dc0c0 100644
--- a/examples/simple/simple.cpp
+++ b/examples/simple/simple.cpp
@@ -6,28 +6,27 @@
#include
#include
-int main(int argc, char ** argv) {
- gpt_params params;
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
- if (argc == 1 || argv[1][0] == '-') {
- printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]);
- return 1 ;
- }
+ LOG_TEE("\nexample usage:\n");
+ LOG_TEE("\n %s -m model.gguf -p \"Hello my name is\" -n 32\n", argv[0]);
+ LOG_TEE("\n");
+}
- if (argc >= 2) {
- params.model = argv[1];
- }
+int main(int argc, char ** argv) {
+ gpt_params params;
- if (argc >= 3) {
- params.prompt = argv[2];
- }
+ params.prompt = "Hello my name is";
+ params.n_predict = 32;
- if (params.prompt.empty()) {
- params.prompt = "Hello my name is";
+ if (!gpt_params_parse(argc, argv, params)) {
+ print_usage(argc, argv, params);
+ return 1;
}
// total length of the sequence including the prompt
- const int n_len = 32;
+ const int n_predict = params.n_predict;
// init LLM
@@ -36,9 +35,7 @@ int main(int argc, char ** argv) {
// initialize the model
- llama_model_params model_params = llama_model_default_params();
-
- // model_params.n_gpu_layers = 99; // offload all layers to the GPU
+ llama_model_params model_params = llama_model_params_from_gpt_params(params);
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
@@ -49,12 +46,7 @@ int main(int argc, char ** argv) {
// initialize the context
- llama_context_params ctx_params = llama_context_default_params();
-
- ctx_params.seed = 1234;
- ctx_params.n_ctx = 2048;
- ctx_params.n_threads = params.n_threads;
- ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;
+ llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
@@ -69,14 +61,14 @@ int main(int argc, char ** argv) {
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
const int n_ctx = llama_n_ctx(ctx);
- const int n_kv_req = tokens_list.size() + (n_len - tokens_list.size());
+ const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());
- LOG_TEE("\n%s: n_len = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_len, n_ctx, n_kv_req);
+ LOG_TEE("\n%s: n_predict = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, n_kv_req);
// make sure the KV cache is big enough to hold all the prompt and generated tokens
if (n_kv_req > n_ctx) {
LOG_TEE("%s: error: n_kv_req > n_ctx, the required KV cache size is not big enough\n", __func__);
- LOG_TEE("%s: either reduce n_len or increase n_ctx\n", __func__);
+ LOG_TEE("%s: either reduce n_predict or increase n_ctx\n", __func__);
return 1;
}
@@ -115,7 +107,7 @@ int main(int argc, char ** argv) {
const auto t_main_start = ggml_time_us();
- while (n_cur <= n_len) {
+ while (n_cur <= n_predict) {
// sample the next token
{
auto n_vocab = llama_n_vocab(model);
@@ -134,7 +126,7 @@ int main(int argc, char ** argv) {
const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
// is it an end of generation?
- if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) {
+ if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
LOG_TEE("\n");
break;
diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp
index 12e46fbc91a24..0939a1a6a7a38 100644
--- a/examples/speculative/speculative.cpp
+++ b/examples/speculative/speculative.cpp
@@ -27,7 +27,8 @@ struct seq_draft {
int main(int argc, char ** argv) {
gpt_params params;
- if (gpt_params_parse(argc, argv, params) == false) {
+ if (!gpt_params_parse(argc, argv, params)) {
+ gpt_params_print_usage(argc, argv, params);
return 1;
}
diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp
index e2f85c68297b8..b779f6bd49876 100644
--- a/examples/train-text-from-scratch/train-text-from-scratch.cpp
+++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp
@@ -302,7 +302,7 @@ static struct ggml_tensor * llama_build_train_graphs(
const int rope_mode = 0;
return ggml_rope_ext(
- ctx, t, KQ_pos, nullptr, n_rot, rope_mode, n_ctx, 0, rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
+ ctx, t, KQ_pos, nullptr, n_rot, rope_mode, n_ctx, rope_freq_base, rope_freq_scale, 0.0f, 1.0f, 0.0f, 0.0f
);
};
diff --git a/flake.nix b/flake.nix
index 9cd3756e53e51..0a52ea52ea9d9 100644
--- a/flake.nix
+++ b/flake.nix
@@ -159,7 +159,6 @@
windows = config.legacyPackages.llamaPackagesWindows.llama-cpp;
}
// lib.optionalAttrs pkgs.stdenv.isLinux {
- opencl = config.packages.default.override { useOpenCL = true; };
cuda = config.legacyPackages.llamaPackagesCuda.llama-cpp;
mpi-cpu = config.packages.default.override { useMpi = true; };
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index daaa0cd6a5473..c81c6a0d783be 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -2702,10 +2702,8 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t
if (cuda_graph_update_required) {
// Extract nodes from graph
- if (cuda_ctx->cuda_graph->num_nodes == 0) {
- // First call with null argument gets number of nodes in graph
- CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, nullptr, &cuda_ctx->cuda_graph->num_nodes));
- }
+ // First call with null argument gets number of nodes in graph
+ CUDA_CHECK(cudaGraphGetNodes(cuda_ctx->cuda_graph->graph, nullptr, &cuda_ctx->cuda_graph->num_nodes));
// Subsequent call with non-null argument gets nodes
cuda_ctx->cuda_graph->nodes.resize(cuda_ctx->cuda_graph->num_nodes);
cuda_ctx->cuda_graph->params.resize(cuda_ctx->cuda_graph->num_nodes);
diff --git a/ggml-cuda/rope.cu b/ggml-cuda/rope.cu
index 0dd07977ebab1..596fb7c135058 100644
--- a/ggml-cuda/rope.cu
+++ b/ggml-cuda/rope.cu
@@ -1,7 +1,7 @@
#include "rope.cuh"
struct rope_corr_dims {
- float v[4];
+ float v[2];
};
static __device__ float rope_yarn_ramp(const float low, const float high, const int i0) {
@@ -13,8 +13,7 @@ static __device__ float rope_yarn_ramp(const float low, const float high, const
// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
static __device__ void rope_yarn(
float theta_extrap, float freq_scale, rope_corr_dims corr_dims, int64_t i0, float ext_factor, float mscale,
- float * cos_theta, float * sin_theta
-) {
+ float * cos_theta, float * sin_theta) {
// Get n-d rotational scaling corrected for extrapolation
float theta_interp = freq_scale * theta_extrap;
float theta = theta_interp;
@@ -29,27 +28,38 @@ static __device__ void rope_yarn(
*sin_theta = sinf(theta) * mscale;
}
-// rope == RoPE == rotary positional embedding
-template
-static __global__ void rope(
- const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base,
- float ext_factor, float attn_factor, rope_corr_dims corr_dims
-) {
- const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
+template
+static __global__ void rope_norm(
+ const T * x, T * dst, int ne0, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, const float * freq_factors) {
+ const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
- if (col >= ncols) {
+ if (i0 >= ne0) {
return;
}
const int row = blockDim.x*blockIdx.x + threadIdx.x;
- const int i = row*ncols + col;
+
+ if (i0 >= n_dims) {
+ const int i = row*ne0 + i0;
+
+ dst[i + 0] = x[i + 0];
+ dst[i + 1] = x[i + 1];
+
+ return;
+ }
+
+ const int i = row*ne0 + i0;
const int i2 = row/p_delta_rows;
- const int p = has_pos ? pos[i2] : 0;
- const float theta_base = p*powf(freq_base, -float(col)/ncols);
+ const float theta_base = pos[i2]*powf(theta_scale, i0/2.0f);
+
+ const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
- float cos_theta, sin_theta;
- rope_yarn(theta_base, freq_scale, corr_dims, col, ext_factor, attn_factor, &cos_theta, &sin_theta);
+ float cos_theta;
+ float sin_theta;
+
+ rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
const float x0 = x[i + 0];
const float x1 = x[i + 1];
@@ -58,23 +68,20 @@ static __global__ void rope(
dst[i + 1] = x0*sin_theta + x1*cos_theta;
}
-template
+template
static __global__ void rope_neox(
- const T * x, T * dst, int ncols, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows,
- float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, const float * freq_factors
-) {
- const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
+ const T * x, T * dst, int ne0, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, const float * freq_factors) {
+ const int i0 = 2*(blockDim.y*blockIdx.y + threadIdx.y);
- if (col >= ncols) {
+ if (i0 >= ne0) {
return;
}
const int row = blockDim.x*blockIdx.x + threadIdx.x;
- const int ib = col / n_dims;
- const int ic = col % n_dims;
- if (ib > 0) {
- const int i = row*ncols + ib*n_dims + ic;
+ if (i0 >= n_dims) {
+ const int i = row*ne0 + i0;
dst[i + 0] = x[i + 0];
dst[i + 1] = x[i + 1];
@@ -82,16 +89,17 @@ static __global__ void rope_neox(
return;
}
- const int i = row*ncols + ib*n_dims + ic/2;
+ const int i = row*ne0 + i0/2;
const int i2 = row/p_delta_rows;
- const int p = has_pos ? pos[i2] : 0;
- const float freq_factor = has_freq_facs ? freq_factors[ic/2] : 1.0f;
+ const float theta_base = pos[i2]*powf(theta_scale, i0/2.0f);
- const float theta_base = p*powf(theta_scale, col/2.0f)/freq_factor;
+ const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
- float cos_theta, sin_theta;
- rope_yarn(theta_base, freq_scale, corr_dims, ic, ext_factor, attn_factor, &cos_theta, &sin_theta);
+ float cos_theta;
+ float sin_theta;
+
+ rope_yarn(theta_base/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
const float x0 = x[i + 0];
const float x1 = x[i + n_dims/2];
@@ -100,144 +108,81 @@ static __global__ void rope_neox(
dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta;
}
-static __global__ void rope_glm_f32(
- const float * x, float * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base,
- int n_ctx
-) {
- const int col = blockDim.x*blockIdx.x + threadIdx.x;
- const int half_n_dims = ncols/4;
-
- if (col >= half_n_dims) {
- return;
- }
-
- const int row = blockDim.y*blockIdx.y + threadIdx.y;
- const int i = row*ncols + col;
- const int i2 = row/p_delta_rows;
-
- const float col_theta_scale = powf(freq_base, -2.0f*col/ncols);
- // FIXME: this is likely wrong
- const int p = pos != nullptr ? pos[i2] : 0;
-
- const float theta = min(p, n_ctx - 2)*freq_scale*col_theta_scale;
- const float sin_theta = sinf(theta);
- const float cos_theta = cosf(theta);
-
- const float x0 = x[i + 0];
- const float x1 = x[i + half_n_dims];
-
- dst[i + 0] = x0*cos_theta - x1*sin_theta;
- dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta;
-
- const float block_theta = ((float)max(p - n_ctx - 2, 0))*col_theta_scale;
- const float sin_block_theta = sinf(block_theta);
- const float cos_block_theta = cosf(block_theta);
-
- const float x2 = x[i + half_n_dims * 2];
- const float x3 = x[i + half_n_dims * 3];
-
- dst[i + half_n_dims * 2] = x2*cos_block_theta - x3*sin_block_theta;
- dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
-}
-
-
template
-static void rope_cuda(
- const T * x, T * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
- float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream
-) {
- GGML_ASSERT(ncols % 2 == 0);
+static void rope_norm_cuda(
+ const T * x, T * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) {
+ GGML_ASSERT(ne0 % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
- const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
- const dim3 block_nums(nrows, num_blocks_x, 1);
- if (pos == nullptr) {
- rope<<>>(
- x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims
- );
+ const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
+ const dim3 block_nums(nr, n_blocks_x, 1);
+
+ const float theta_scale = powf(freq_base, -2.0f/n_dims);
+
+ if (freq_factors == nullptr) {
+ rope_norm<<>>(
+ x, dst, ne0, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
+ theta_scale, freq_factors
+ );
} else {
- rope<<>>(
- x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims
- );
+ rope_norm<<>>(
+ x, dst, ne0, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
+ theta_scale, freq_factors
+ );
}
}
template
static void rope_neox_cuda(
- const T * x, T * dst, int ncols, int n_dims, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
- float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream
-) {
- GGML_ASSERT(ncols % 2 == 0);
+ const T * x, T * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) {
+ GGML_ASSERT(ne0 % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
- const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
- const dim3 block_nums(nrows, num_blocks_x, 1);
+ const int n_blocks_x = (ne0 + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
+ const dim3 block_nums(nr, n_blocks_x, 1);
const float theta_scale = powf(freq_base, -2.0f/n_dims);
- if (pos == nullptr) {
- if (freq_factors == nullptr) {
- rope_neox<<>>(
- x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
- theta_scale, freq_factors
- );
- } else {
- rope_neox<<>>(
- x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
+ if (freq_factors == nullptr) {
+ rope_neox<<>>(
+ x, dst, ne0, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
theta_scale, freq_factors
);
- }
} else {
- if (freq_factors == nullptr) {
- rope_neox<<>>(
- x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
- theta_scale, freq_factors
- );
- } else {
- rope_neox<<>>(
- x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
+ rope_neox<<>>(
+ x, dst, ne0, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
theta_scale, freq_factors
);
- }
}
}
-static void rope_glm_f32_cuda(
- const float * x, float * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
- float freq_base, int n_ctx, cudaStream_t stream
-) {
- GGML_ASSERT(ncols % 4 == 0);
- const dim3 block_dims(CUDA_ROPE_BLOCK_SIZE/4, 1, 1);
- const int num_blocks_x = (ncols + CUDA_ROPE_BLOCK_SIZE - 1) / CUDA_ROPE_BLOCK_SIZE;
- const dim3 block_nums(num_blocks_x, nrows, 1);
- rope_glm_f32<<>>(x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, n_ctx);
-}
-
-static void rope_cuda_f16(
- const half * x, half * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
- float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream) {
+static void rope_norm_cuda_f16(
+ const half * x, half * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) {
- rope_cuda(x, dst, ncols, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, stream);
+ rope_norm_cuda(x, dst, ne0, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream);
}
-static void rope_cuda_f32(
- const float * x, float * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
- float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream) {
+static void rope_norm_cuda_f32(
+ const float * x, float * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) {
- rope_cuda(x, dst, ncols, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, stream);
+ rope_norm_cuda(x, dst, ne0, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream);
}
static void rope_neox_cuda_f16(
- const half * x, half * dst, int ncols, int n_dims, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
+ const half * x, half * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows,
float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream) {
- rope_neox_cuda(x, dst, ncols, n_dims, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream);
+ rope_neox_cuda(x, dst, ne0, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream);
}
static void rope_neox_cuda_f32(
- const float * x, float * dst, int ncols, int n_dims, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
+ const float * x, float * dst, int ne0, int n_dims, int nr, const int32_t * pos, float freq_scale, int p_delta_rows,
float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, const float * freq_factors, cudaStream_t stream
) {
- rope_neox_cuda(x, dst, ncols, n_dims, nrows, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream);
+ rope_neox_cuda(x, dst, ne0, n_dims, nr, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, stream);
}
void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
@@ -258,16 +203,22 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
- const int64_t nrows = ggml_nrows(src0);
+ const int64_t nr = ggml_nrows(src0);
- //const int n_past = ((int32_t *) dst->op_params)[0];
- const int n_dims = ((int32_t *) dst->op_params)[1];
- const int mode = ((int32_t *) dst->op_params)[2];
- const int n_ctx = ((int32_t *) dst->op_params)[3];
- const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ //const int n_past = ((int32_t *) dst->op_params)[0];
+ const int n_dims = ((int32_t *) dst->op_params)[1];
+ const int mode = ((int32_t *) dst->op_params)[2];
+ //const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
// RoPE alteration for extended context
- float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
+ float freq_base;
+ float freq_scale;
+ float ext_factor;
+ float attn_factor;
+ float beta_fast;
+ float beta_slow;
+
memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
@@ -275,38 +226,28 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
- const float * freq_factors = nullptr;
- const int32_t * pos = nullptr;
-
const bool is_neox = mode & 2;
- const bool is_glm = mode & 4;
- pos = (const int32_t *) src1_d;
+ const int32_t * pos = (const int32_t *) src1_d;
- if (is_neox) {
- if (src2 != nullptr) {
- freq_factors = (const float *) src2->data;
- }
- } else {
- GGML_ASSERT(src2 == nullptr && "TODO: freq_factors not implemented for !is_neox");
+ const float * freq_factors = nullptr;
+ if (src2 != nullptr) {
+ freq_factors = (const float *) src2->data;
}
rope_corr_dims corr_dims;
- ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims.v);
+ ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims.v);
// compute
- if (is_glm) {
- GGML_ASSERT(false);
- rope_glm_f32_cuda(src0_d, dst_d, ne00, nrows, pos, freq_scale, ne01, freq_base, n_ctx, stream);
- } else if (is_neox) {
+ if (is_neox) {
if (src0->type == GGML_TYPE_F32) {
rope_neox_cuda_f32(
- (const float *)src0_d, (float *)dst_d, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
+ (const float *)src0_d, (float *)dst_d, ne00, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor,
attn_factor, corr_dims, freq_factors, stream
);
} else if (src0->type == GGML_TYPE_F16) {
rope_neox_cuda_f16(
- (const half *)src0_d, (half *)dst_d, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
+ (const half *)src0_d, (half *)dst_d, ne00, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor,
attn_factor, corr_dims, freq_factors, stream
);
} else {
@@ -314,14 +255,14 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
}
} else {
if (src0->type == GGML_TYPE_F32) {
- rope_cuda_f32(
- (const float *)src0_d, (float *)dst_d, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
- attn_factor, corr_dims, stream
+ rope_norm_cuda_f32(
+ (const float *)src0_d, (float *)dst_d, ne00, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor,
+ attn_factor, corr_dims, freq_factors, stream
);
} else if (src0->type == GGML_TYPE_F16) {
- rope_cuda_f16(
- (const half *)src0_d, (half *)dst_d, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
- attn_factor, corr_dims, stream
+ rope_norm_cuda_f16(
+ (const half *)src0_d, (half *)dst_d, ne00, n_dims, nr, pos, freq_scale, ne01, freq_base, ext_factor,
+ attn_factor, corr_dims, freq_factors, stream
);
} else {
GGML_ASSERT(false);
diff --git a/ggml-kompute.cpp b/ggml-kompute.cpp
index eabd70d5eeed8..5592741be4255 100644
--- a/ggml-kompute.cpp
+++ b/ggml-kompute.cpp
@@ -1192,7 +1192,7 @@ static void ggml_vk_rope(
const std::shared_ptr& inB,
const std::shared_ptr& out,
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
- ggml_type src0t, int32_t n_dims, int32_t mode, int32_t n_orig_ctx,
+ ggml_type src0t, int32_t n_dims, int32_t mode, int32_t n_ctx_orig,
float freq_base, float freq_scale, float ext_factor, float attn_factor, float beta_fast, float beta_slow,
int32_t ne01, int32_t ne02, int32_t ne03,
uint32_t nb00, uint32_t nb01, uint32_t nb02, uint32_t nb03,
@@ -1221,14 +1221,14 @@ static void ggml_vk_rope(
struct PushConstants {
uint32_t inAOff, inBOff, outOff;
- int32_t n_dims, mode, n_orig_ctx;
+ int32_t n_dims, mode, n_ctx_orig;
float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
uint32_t nb00, nb01, nb02, nb03;
int32_t ne0;
uint32_t nb0, nb1, nb2, nb3;
} pushConsts {
safe_divide(inAOff, type_size), safe_divide(inBOff, 4), safe_divide(outOff, type_size),
- n_dims, mode, n_orig_ctx,
+ n_dims, mode, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow,
nb00, nb01, nb02, nb03,
ne0,
@@ -1692,13 +1692,16 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
#pragma message(" https://github.com/ggerganov/llama.cpp/pull/7225")
GGML_ASSERT(dst->src[2] == nullptr && "phi3 frequency factors not implemented yet");
+#pragma message("TODO: update rope NORM mode to match NEOX mode")
+#pragma message(" https://github.com/ggerganov/llama.cpp/pull/7634")
+
GGML_ASSERT(ne10 == ne02);
GGML_ASSERT(src0t == dstt);
// const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
// skip 3, n_ctx used in GLM RoPE, unimplemented in Vulkan
- const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
@@ -1708,7 +1711,7 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
ggml_vk_rope(
- seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, src0t, n_dims, mode, n_orig_ctx,
+ seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, src0t, n_dims, mode, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow,
ne01, ne02, ne03, nb00, nb01, nb02, nb03, ne0, nb0, nb1, nb2, nb3
);
diff --git a/ggml-metal.h b/ggml-metal.h
index a5c542189c295..e7543ae795d28 100644
--- a/ggml-metal.h
+++ b/ggml-metal.h
@@ -1,7 +1,7 @@
// An interface allowing to compute ggml_cgraph with Metal
//
// This is a fully functional interface that extends ggml with GPU support for Apple devices.
-// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, OpenCL, etc.)
+// A similar interface can be created for other GPU backends (e.g. Vulkan, CUDA, etc.)
//
// How it works?
//
diff --git a/ggml-metal.m b/ggml-metal.m
index fddc44f78d8af..946f11813dcf9 100644
--- a/ggml-metal.m
+++ b/ggml-metal.m
@@ -172,8 +172,10 @@
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
- GGML_METAL_KERNEL_TYPE_ROPE_F32,
- GGML_METAL_KERNEL_TYPE_ROPE_F16,
+ GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32,
+ GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16,
+ GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F32,
+ GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F16,
GGML_METAL_KERNEL_TYPE_IM2COL_F16,
GGML_METAL_KERNEL_TYPE_IM2COL_F32,
GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
@@ -626,8 +628,10 @@ static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32, mul_mm_id_iq1_m_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
- GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
- GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32, rope_norm_f32, true);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16, rope_norm_f16, true);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F32, rope_neox_f32, true);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F16, rope_neox_f16, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
@@ -2285,7 +2289,7 @@ static enum ggml_status ggml_metal_graph_compute(
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
// skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
- const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
float freq_base;
float freq_scale;
@@ -2302,22 +2306,23 @@ static enum ggml_status ggml_metal_graph_compute(
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
const bool is_neox = mode & 2;
- const bool is_glm = mode & 4;
- GGML_ASSERT(!is_glm && "GLM RoPE not implemented in Metal");
+ id pipeline = nil;
if (!is_neox) {
- GGML_ASSERT(id_src2 == nil && "TODO: freq_factors not implemented for !is_neox");
+ switch (src0->type) {
+ case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NORM_F32].pipeline; break;
+ case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NORM_F16].pipeline; break;
+ default: GGML_ASSERT(false);
+ };
+ } else {
+ switch (src0->type) {
+ case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F32].pipeline; break;
+ case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_NEOX_F16].pipeline; break;
+ default: GGML_ASSERT(false);
+ };
}
- id pipeline = nil;
-
- switch (src0->type) {
- case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F32].pipeline; break;
- case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F16].pipeline; break;
- default: GGML_ASSERT(false);
- };
-
[encoder setComputePipelineState:pipeline];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
@@ -2345,14 +2350,13 @@ static enum ggml_status ggml_metal_graph_compute(
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:19];
[encoder setBytes:&n_past length:sizeof( int) atIndex:20];
[encoder setBytes:&n_dims length:sizeof( int) atIndex:21];
- [encoder setBytes:&mode length:sizeof( int) atIndex:22];
- [encoder setBytes:&n_orig_ctx length:sizeof( int) atIndex:23];
- [encoder setBytes:&freq_base length:sizeof( float) atIndex:24];
- [encoder setBytes:&freq_scale length:sizeof( float) atIndex:25];
- [encoder setBytes:&ext_factor length:sizeof( float) atIndex:26];
- [encoder setBytes:&attn_factor length:sizeof( float) atIndex:27];
- [encoder setBytes:&beta_fast length:sizeof( float) atIndex:28];
- [encoder setBytes:&beta_slow length:sizeof( float) atIndex:29];
+ [encoder setBytes:&n_ctx_orig length:sizeof( int) atIndex:22];
+ [encoder setBytes:&freq_base length:sizeof( float) atIndex:23];
+ [encoder setBytes:&freq_scale length:sizeof( float) atIndex:24];
+ [encoder setBytes:&ext_factor length:sizeof( float) atIndex:25];
+ [encoder setBytes:&attn_factor length:sizeof( float) atIndex:26];
+ [encoder setBytes:&beta_fast length:sizeof( float) atIndex:27];
+ [encoder setBytes:&beta_slow length:sizeof( float) atIndex:28];
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
diff --git a/ggml-metal.metal b/ggml-metal.metal
index 0cb85e1a5bad4..e2796fd601281 100644
--- a/ggml-metal.metal
+++ b/ggml-metal.metal
@@ -1654,8 +1654,7 @@ static float rope_yarn_ramp(const float low, const float high, const int i0) {
// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
static void rope_yarn(
float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
- thread float * cos_theta, thread float * sin_theta
-) {
+ thread float * cos_theta, thread float * sin_theta) {
// Get n-d rotational scaling corrected for extrapolation
float theta_interp = freq_scale * theta_extrap;
float theta = theta_interp;
@@ -1672,19 +1671,20 @@ static void rope_yarn(
// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
-static float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) {
- return n_dims * log(n_orig_ctx / (n_rot * 2 * M_PI_F)) / (2 * log(base));
+static float rope_yarn_corr_factor(int n_dims, int n_ctx_orig, float n_rot, float base) {
+ return n_dims * log(n_ctx_orig / (n_rot * 2 * M_PI_F)) / (2 * log(base));
}
static void rope_yarn_corr_dims(
- int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
+ int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]
) {
// start and end correction dims
- dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base)));
- dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base)));
+ dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_fast, freq_base)));
+ dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_slow, freq_base)));
}
-typedef void (rope_t)(
+template
+kernel void kernel_rope_norm(
device const void * src0,
device const int32_t * src1,
device const float * src2,
@@ -1707,8 +1707,7 @@ typedef void (rope_t)(
constant uint64_t & nb3,
constant int & n_past,
constant int & n_dims,
- constant int & mode,
- constant int & n_orig_ctx,
+ constant int & n_ctx_orig,
constant float & freq_base,
constant float & freq_scale,
constant float & ext_factor,
@@ -1717,10 +1716,52 @@ typedef void (rope_t)(
constant float & beta_slow,
uint tiitg[[thread_index_in_threadgroup]],
uint3 tptg[[threads_per_threadgroup]],
- uint3 tgpig[[threadgroup_position_in_grid]]);
+ uint3 tgpig[[threadgroup_position_in_grid]]) {
+ const int64_t i3 = tgpig[2];
+ const int64_t i2 = tgpig[1];
+ const int64_t i1 = tgpig[0];
+
+ float corr_dims[2];
+ rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
+
+ device const int32_t * pos = src1;
+
+ const float theta_base = (float) pos[i2];
+ const float inv_ndims = -1.f/n_dims;
+
+ float cos_theta;
+ float sin_theta;
+
+ for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) {
+ if (i0 < n_dims) {
+ const int64_t ic = i0/2;
+
+ const float theta = theta_base * pow(freq_base, inv_ndims*i0);
+
+ const float freq_factor = src2 != src0 ? src2[ic] : 1.0f;
+
+ rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
+
+ device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float x0 = src[0];
+ const float x1 = src[1];
+
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[1] = x0*sin_theta + x1*cos_theta;
+ } else {
+ device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
+ }
+ }
+}
template
-kernel void kernel_rope(
+kernel void kernel_rope_neox(
device const void * src0,
device const int32_t * src1,
device const float * src2,
@@ -1743,8 +1784,7 @@ kernel void kernel_rope(
constant uint64_t & nb3,
constant int & n_past,
constant int & n_dims,
- constant int & mode,
- constant int & n_orig_ctx,
+ constant int & n_ctx_orig,
constant float & freq_base,
constant float & freq_scale,
constant float & ext_factor,
@@ -1758,69 +1798,53 @@ kernel void kernel_rope(
const int64_t i2 = tgpig[1];
const int64_t i1 = tgpig[0];
- const bool is_neox = mode & 2;
-
float corr_dims[2];
- rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
+ rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
device const int32_t * pos = src1;
- const int64_t p = pos[i2];
-
- const float theta_base = (float)p;
+ const float theta_base = (float) pos[i2];
const float inv_ndims = -1.f/n_dims;
- if (!is_neox) {
- for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) {
- const float theta = theta_base * pow(freq_base, inv_ndims*i0);
-
- float cos_theta, sin_theta;
- rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
-
- device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
-
- const T x0 = src[0];
- const T x1 = src[1];
+ float cos_theta;
+ float sin_theta;
- dst_data[0] = x0*cos_theta - x1*sin_theta;
- dst_data[1] = x0*sin_theta + x1*cos_theta;
- }
- } else {
- for (int64_t ic = 2*tiitg; ic < ne0; ic += 2*tptg.x) {
- if (ic < n_dims) {
- const int64_t i0 = ic/2;
+ for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) {
+ if (i0 < n_dims) {
+ const int64_t ic = i0/2;
- const float freq_factor = src2 != src0 ? src2[i0] : 1.0f;
-
- const float theta = theta_base * pow(freq_base, inv_ndims*ic);
+ const float theta = theta_base * pow(freq_base, inv_ndims*i0);
- float cos_theta, sin_theta;
- rope_yarn(theta/freq_factor, freq_scale, corr_dims, ic, ext_factor, attn_factor, &cos_theta, &sin_theta);
+ const float freq_factor = src2 != src0 ? src2[ic] : 1.0f;
- device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ rope_yarn(theta/freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
- const float x0 = src[0];
- const float x1 = src[n_dims/2];
+ device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00);
+ device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0);
- dst_data[0] = x0*cos_theta - x1*sin_theta;
- dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
- } else {
- const int64_t i0 = ic;
+ const float x0 = src[0];
+ const float x1 = src[n_dims/2];
- device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
+ } else {
+ device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- dst_data[0] = src[0];
- dst_data[1] = src[1];
- }
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
}
}
}
-template [[host_name("kernel_rope_f32")]] kernel rope_t kernel_rope;
-template [[host_name("kernel_rope_f16")]] kernel rope_t kernel_rope;
+typedef decltype(kernel_rope_norm) kernel_rope_norm_t;
+typedef decltype(kernel_rope_neox) kernel_rope_neox_t;
+
+template [[host_name("kernel_rope_norm_f32")]] kernel kernel_rope_norm_t kernel_rope_norm;
+template [[host_name("kernel_rope_norm_f16")]] kernel kernel_rope_norm_t kernel_rope_norm;
+
+template [[host_name("kernel_rope_neox_f32")]] kernel kernel_rope_neox_t kernel_rope_neox;
+template [[host_name("kernel_rope_neox_f16")]] kernel kernel_rope_neox_t kernel_rope_neox;
typedef void (im2col_t)(
device const float * x,
diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp
deleted file mode 100644
index e28566a7bdbd7..0000000000000
--- a/ggml-opencl.cpp
+++ /dev/null
@@ -1,2305 +0,0 @@
-#include "ggml.h"
-#include "ggml-opencl.h"
-#include "ggml-backend-impl.h"
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#define CL_TARGET_OPENCL_VERSION 120
-#include
-
-#if defined(_MSC_VER)
-#pragma warning(disable: 4244 4267) // possible loss of data
-#endif
-
-#define CL_DMMV_LOCAL_SIZE 32
-
-#ifndef K_QUANTS_PER_ITERATION
-#define K_QUANTS_PER_ITERATION 1
-#else
-static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
-#endif
-
-#define MULTILINE_QUOTE(...) #__VA_ARGS__
-static std::string program_source = MULTILINE_QUOTE(
-
-typedef char int8_t;
-typedef uchar uint8_t;
-typedef short int16_t;
-typedef ushort uint16_t;
-typedef int int32_t;
-typedef uint uint32_t;
-
-struct __attribute__ ((packed)) block_q4_0
-{
- half d;
- uint8_t qs[QK4_0 / 2];
-};
-
-struct __attribute__ ((packed)) block_q4_1
-{
- half d;
- half m;
- uint8_t qs[QK4_1 / 2];
-};
-
-struct __attribute__ ((packed)) block_q5_0
-{
- half d;
- uint32_t qh;
- uint8_t qs[QK5_0 / 2];
-};
-
-struct __attribute__ ((packed)) block_q5_1
-{
- half d;
- half m;
- uint32_t qh;
- uint8_t qs[QK5_1 / 2];
-};
-
-struct __attribute__ ((packed)) block_q8_0
-{
- half d;
- int8_t qs[QK8_0];
-};
-
-struct __attribute__((packed)) block_q2_K
-{
- uint8_t scales[16];
- uint8_t qs[64];
- half d;
- half dmin;
-};
-
-struct __attribute__((packed)) block_q3_K
-{
- uint8_t hmask[32];
- uint8_t qs[64];
- uint8_t scales[12];
- half d;
-};
-
-struct __attribute__((packed)) block_q4_K
-{
- half d;
- half dmin;
- uint8_t scales[12];
- uint8_t qs[128];
-};
-
-struct __attribute__((packed)) block_q5_K
-{
- half d;
- half dmin;
- uint8_t scales[12];
- uint8_t qh[32];
- uint8_t qs[128];
-};
-
-struct __attribute__((packed)) block_q6_K
-{
- uint8_t ql[128];
- uint8_t qh[64];
- int8_t scales[16];
- half d;
-};
-
-__kernel void convert_fp16_to_fp32(__global half* x, __global float* y) {
- const uint i = get_global_id(0);
-
- y[i] = vload_half(0, &x[i]);
-}
-
-void dequantize_q4_0(__global const struct block_q4_0* x, const int ib, const int iqs, float* v0, float* v1) {
- const float d = vload_half(0, &x[ib].d);
-
- const uint8_t vui = x[ib].qs[iqs];
-
- const int8_t vi0 = vui & 0xF;
- const int8_t vi1 = vui >> 4;
-
- *v0 = (vi0 - 8)*d;
- *v1 = (vi1 - 8)*d;
-}
-void dequantize_q4_1(__global const struct block_q4_1* x, const int ib, const int iqs, float* v0, float* v1) {
- const float d = vload_half(0, &x[ib].d);
- const float m = vload_half(0, &x[ib].m);
-
- const uint8_t vui = x[ib].qs[iqs];
-
- const int8_t vi0 = vui & 0xF;
- const int8_t vi1 = vui >> 4;
-
- *v0 = vi0*d + m;
- *v1 = vi1*d + m;
-}
-void dequantize_q5_0(__global const struct block_q5_0* x, const int ib, const int iqs, float* v0, float* v1) {
- const float d = vload_half(0, &x[ib].d);
-
- uint32_t qh = x[ib].qh;
-
- const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
-
- const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0) - 16;
- const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1) - 16;
-
- *v0 = x0*d;
- *v1 = x1*d;
-}
-void dequantize_q5_1(__global const struct block_q5_1* x, const int ib, const int iqs, float* v0, float* v1) {
- const float d = vload_half(0, &x[ib].d);
- const float m = vload_half(0, &x[ib].m);
-
- uint32_t qh = x[ib].qh;
-
- const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
-
- const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0);
- const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1);
-
- *v0 = x0*d + m;
- *v1 = x1*d + m;
-}
-void dequantize_q8_0(__global const struct block_q8_0* x, const int ib, const int iqs, float* v0, float* v1) {
- const float d = vload_half(0, &x[ib].d);
-
- const int8_t vi0 = x[ib].qs[iqs + 0];
- const int8_t vi1 = x[ib].qs[iqs + 1];
-
- *v0 = vi0*d;
- *v1 = vi1*d;
-}
-void convert_f16(__global half* x, const int ib, const int iqs, float* v0, float* v1){
- *v0 = vload_half(0, &x[ib + 0]);
- *v1 = vload_half(0, &x[ib + 1]);
-}
-);
-
-static std::string k_quants_source = MULTILINE_QUOTE(
-inline void get_scale_min_k4(int j, const __global uint8_t *q, uint8_t *d, uint8_t *m)
-{
- if (j < 4)
- {
- *d = q[j] & 63;
- *m = q[j + 4] & 63;
- }
- else
- {
- *d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4);
- *m = (q[j + 4] >> 4) | ((q[j - 0] >> 6) << 4);
- }
-}
-
-__kernel void dequantize_block_q2_K(__global const struct block_q2_K *x, __global float *yy)
-{
- const int i = get_group_id(0) + get_global_offset(0);
- const int tid = get_local_id(0);
- const int n = tid / 32;
- const int l = tid - 32 * n;
- const int is = 8 * n + l / 16;
-
- const uint8_t q = x[i].qs[32 * n + l];
- __global float *y = yy + get_group_id(0) * QK_K + 128 * n;
-
- const float dall = vload_half(0, &x[i].d);
- const float dmin = vload_half(0, &x[i].dmin);
-
- y[l + 0] = dall * (x[i].scales[is + 0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is + 0] >> 4);
- y[l + 32] = dall * (x[i].scales[is + 2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is + 2] >> 4);
- y[l + 64] = dall * (x[i].scales[is + 4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is + 4] >> 4);
- y[l + 96] = dall * (x[i].scales[is + 6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is + 6] >> 4);
-}
-
-__kernel void dequantize_block_q3_K(__global const struct block_q3_K *x, __global float *yy)
-{
- int r = get_local_id(0) / 4;
- int i = get_group_id(0) + get_global_offset(0);
- int tid = r / 2;
- int is0 = r % 2;
- int l0 = 16 * is0 + 4 * (get_local_id(0) % 4);
- int n = tid / 4;
- int j = tid - 4 * n;
-
- uint8_t m = 1 << (4 * n + j);
- int is = 8 * n + 2 * j + is0;
- int shift = 2 * j;
-
- int8_t us = is < 4 ? (x[i].scales[is - 0] & 0xF) | (((x[i].scales[is + 8] >> 0) & 3) << 4)
- : is < 8 ? (x[i].scales[is - 0] & 0xF) | (((x[i].scales[is + 4] >> 2) & 3) << 4)
- : is < 12 ? (x[i].scales[is - 8] >> 4) | (((x[i].scales[is + 0] >> 4) & 3) << 4)
- : (x[i].scales[is - 8] >> 4) | (((x[i].scales[is - 4] >> 6) & 3) << 4);
- float d_all = vload_half(0, &x[i].d);
- float dl = d_all * (us - 32);
-
- __global float *y = yy + get_group_id(0) * QK_K + 128 * n + 32 * j;
- const __global uint8_t *q = x[i].qs + 32 * n;
- const __global uint8_t *hm = x[i].hmask;
-
- for (int l = l0; l < l0 + 4; ++l)
- y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
-}
-
-__kernel void dequantize_block_q4_K(__global const struct block_q4_K *x, __global float *yy)
-{
- const int i = get_group_id(0) + get_global_offset(0);
- const int tid = get_local_id(0);
- const int il = tid / 8;
- const int ir = tid % 8;
- const int is = 2 * il;
- const int n = 4;
-
- __global float *y = yy + get_group_id(0) * QK_K + 64 * il + n * ir;
-
- const float dall = vload_half(0, &x[i].d);
- const float dmin = vload_half(0, &x[i].dmin);
-
- __global const uint8_t *q = x[i].qs + 32 * il + n * ir;
-
- uint8_t sc, m;
- get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
- float d1 = dall * sc;
- float m1 = dmin * m;
- get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
- float d2 = dall * sc;
- float m2 = dmin * m;
- for (int l = 0; l < n; ++l)
- {
- y[l + 0] = d1 * (q[l] & 0xF) - m1;
- y[l + 32] = d2 * (q[l] >> 4) - m2;
- }
-}
-
-__kernel void dequantize_block_q5_K(__global const struct block_q5_K *x, __global float *yy)
-{
- const int i = get_group_id(0) + get_global_offset(0);
- const int tid = get_local_id(0);
- const int il = tid / 16;
- const int ir = tid % 16;
- const int is = 2 * il;
-
- __global float *y = yy + get_group_id(0) * QK_K + 64 * il + 2 * ir;
-
- const float dall = vload_half(0, &x[i].d);
- const float dmin = vload_half(0, &x[i].dmin);
-
- __global const uint8_t *ql = x[i].qs + 32 * il + 2 * ir;
- __global const uint8_t *qh = x[i].qh + 2 * ir;
-
- uint8_t sc, m;
- get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
- const float d1 = dall * sc;
- const float m1 = dmin * m;
- get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
- const float d2 = dall * sc;
- const float m2 = dmin * m;
-
- uint8_t hm = 1 << (2 * il);
- y[0] = d1 * ((ql[0] & 0xF) + (qh[0] & hm ? 16 : 0)) - m1;
- y[1] = d1 * ((ql[1] & 0xF) + (qh[1] & hm ? 16 : 0)) - m1;
- hm <<= 1;
- y[32] = d2 * ((ql[0] >> 4) + (qh[0] & hm ? 16 : 0)) - m2;
- y[33] = d2 * ((ql[1] >> 4) + (qh[1] & hm ? 16 : 0)) - m2;
-}
-
-__kernel void dequantize_block_q6_K(__global const struct block_q6_K *x, __global float *yy)
-{
- const int i = get_group_id(0) + get_global_offset(0);
- const int tid = get_local_id(0);
- const int ip = tid / 32;
- const int il = tid - 32 * ip;
- const int is = 8 * ip + il / 16;
-
- __global float *y = yy + get_group_id(0) * QK_K + 128 * ip + il;
-
- const float d = vload_half(0, &x[i].d);
-
- __global const uint8_t *ql = x[i].ql + 64 * ip + il;
- const uint8_t qh = x[i].qh[32 * ip + il];
- __global const int8_t *sc = x[i].scales + is;
-
- y[0] = d * sc[0] * ((int8_t)((ql[0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
- y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
- y[64] = d * sc[4] * ((int8_t)((ql[0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
- y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
-}
-
-__kernel void dequantize_mul_mat_vec_q2_K(__global const struct block_q2_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
-
- const int row = get_group_id(0);
-
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row + get_global_offset(0);
-
- __global const struct block_q2_K * x = xx + ib0;
-
- const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...31 or 0...15
- const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION; // 0 or 0,1
-
- const int step = 16/K_QUANTS_PER_ITERATION;
-
- const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
- const int in = tid - step*im; // 0...15 or 0...7
-
- const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
- const int q_offset = 32*im + l0;
- const int s_offset = 8*im;
- const int y_offset = 128*im + l0;
-
- tmp[16 * ix + tid] = 0;
-
- uint32_t aux[4];
- const uint8_t * d = (const uint8_t *)aux;
- const uint8_t * m = (const uint8_t *)(aux + 2);
-
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
-
- __global const float * y = yy + i * QK_K + y_offset;
- __global const uint8_t * q = x[i].qs + q_offset;
-
- const float dall = vload_half(0, &x[i].d);
- const float dmin = vload_half(0, &x[i].dmin);
-
- __global const uint32_t * a = (__global const uint32_t *)(x[i].scales + s_offset);
- aux[0] = a[0] & 0x0f0f0f0f;
- aux[1] = a[1] & 0x0f0f0f0f;
- aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
- aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
-
- float sum1 = 0, sum2 = 0;
- for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
- sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
- + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
- + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
- + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
- + y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
- + y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
- + y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
- +y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
- sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
- + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
-
- }
- tmp[16 * ix + tid] += dall * sum1 - dmin * sum2;
-
- }
-
- // sum up partial sums and write back result
- barrier(CLK_LOCAL_MEM_FENCE);
- for (int s=16; s>0; s>>=1) {
- if (tid < s) {
- tmp[tid] += tmp[tid + s];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if (tid == 0) {
- dst[row] = tmp[0];
- }
-}
-
-__kernel void dequantize_mul_mat_vec_q3_K(__global const struct block_q3_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
- const uint16_t kmask1 = 0x0303;
- const uint16_t kmask2 = 0x0f0f;
-
- const int row = get_group_id(0);
-
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row + get_global_offset(0);
-
- __global const struct block_q3_K * x = xx + ib0;
-
- const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
- const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION; // 0 or 0,1
-
- const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
- const int step = 16/K_QUANTS_PER_ITERATION;
- const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
- const int in = tid - step*im; // 0....15 or 0...7
-
- const uint8_t m = 1 << (4*im);
-
- const int l0 = n*in; // 0...15 or 0...14 in steps of 2
- const int q_offset = 32*im + l0;
- const int y_offset = 128*im + l0;
-
- uint16_t utmp[4];
- const int8_t * s = (const int8_t *)utmp;
-
- const uint16_t s_shift = 4*im;
-
- tmp[16 * ix + tid] = 0;
-
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
-
- __global const float * y = yy + i * QK_K + y_offset;
- __global const uint8_t * q = x[i].qs + q_offset;
- __global const uint8_t * h = x[i].hmask + l0;
-
- __global const uint16_t * a = (__global const uint16_t *)x[i].scales;
- utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
- utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
- utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
- utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
-
- const float d = vload_half(0, &x[i].d);
-
- float sum = 0;
- for (int l = 0; l < n; ++l) {
- sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
- + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
- + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
- + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
- sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
- + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
- + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
- + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
- }
- tmp[16 * ix + tid] += d * sum;
-
- }
-
- // sum up partial sums and write back result
- barrier(CLK_LOCAL_MEM_FENCE);
- for (int s=16; s>0; s>>=1) {
- if (tid < s) {
- tmp[tid] += tmp[tid + s];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if (tid == 0) {
- dst[row] = tmp[0];
- }
-}
-
-__kernel void dequantize_mul_mat_vec_q4_K(__global const struct block_q4_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
-
- //to rename it later, just to test now
- const uint16_t kmask1 = 0x3f3f;
- const uint16_t kmask2 = 0x0f0f;
- const uint16_t kmask3 = 0xc0c0;
-
- const int row = get_group_id(0);
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row + get_global_offset(0);
-
- const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...15
- const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION;
-
- const int step = 8/K_QUANTS_PER_ITERATION;
-
- const int il = tid/step; // 0...3
- const int ir = tid - step*il;// 0...3
- const int n = 2*K_QUANTS_PER_ITERATION;
-
- const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
- const int in = il%2;
-
- const int l0 = n*(2*ir + in);
- const int q_offset = 32*im + l0;
- const int y_offset = 64*im + l0;
-
- uint16_t aux[4];
- const uint8_t * sc = (const uint8_t *)aux;
-
- __global const struct block_q4_K * x = xx + ib0;
-
- tmp[16 * ix + tid] = 0;
-
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
-
- __global const uint8_t * q1 = x[i].qs + q_offset;
- __global const uint8_t * q2 = q1 + 64;
- __global const float * y1 = yy + i*QK_K + y_offset;
- __global const float * y2 = y1 + 128;
-
- const float dall = vload_half(0, &x[i].d);
- const float dmin = vload_half(0, &x[i].dmin);
-
- __global const uint16_t * a = (__global const uint16_t *)x[i].scales;
- aux[0] = a[im+0] & kmask1;
- aux[1] = a[im+2] & kmask1;
- aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
- aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
-
- float4 s = (float4)(0.f);
- float smin = 0;
- for (int l = 0; l < n; ++l) {
- s.x += y1[l] * (q1[l] & 0xF); s.y += y1[l+32] * (q1[l] >> 4);
- s.z += y2[l] * (q2[l] & 0xF); s.w += y2[l+32] * (q2[l] >> 4);
- smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
- }
- tmp[16 * ix + tid] += dall * (s.x * sc[0] + s.y * sc[1] + s.z * sc[4] + s.w * sc[5]) - dmin * smin;
-
- }
-
- // sum up partial sums and write back result
- barrier(CLK_LOCAL_MEM_FENCE);
- for (int s=16; s>0; s>>=1) {
- if (tid < s) {
- tmp[tid] += tmp[tid + s];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if (tid == 0) {
- dst[row] = tmp[0];
- }
-}
-
-__kernel void dequantize_mul_mat_vec_q5_K(__global const struct block_q5_K * xx, __local float* tmp, __global float* yy, __global float* dst, const int ncols) {
-
- const uint16_t kmask1 = 0x3f3f;
- const uint16_t kmask2 = 0x0f0f;
- const uint16_t kmask3 = 0xc0c0;
-
- const int row = get_group_id(0);
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row + get_global_offset(0);
-
- const int tid = get_local_id(0)/2; // 0...15
- const int ix = get_local_id(0)%2;
-
- const int il = tid/4; // 0...3
- const int ir = tid - 4*il;// 0...3
- const int n = 2;
-
- const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
- const int in = il%2;
-
- const int l0 = n*(2*ir + in);
- const int q_offset = 32*im + l0;
- const int y_offset = 64*im + l0;
-
- const uint8_t hm1 = 1 << (2*im);
- const uint8_t hm2 = hm1 << 4;
-
- uint16_t aux[4];
- const uint8_t * sc = (const uint8_t *)aux;
-
- __global const struct block_q5_K * x = xx + ib0;
-
- tmp[16 * ix + tid] = 0;
-
- for (int i = ix; i < num_blocks_per_row; i += 2) {
-
- __global const uint8_t * ql1 = x[i].qs + q_offset;
- __global const uint8_t * ql2 = ql1 + 64;
- __global const uint8_t * qh = x[i].qh + l0;
- __global const float * y1 = yy + i*QK_K + y_offset;
- __global const float * y2 = y1 + 128;
-
- const float dall = vload_half(0, &x[i].d);
- const float dmin = vload_half(0, &x[i].dmin);
-
- __global const uint16_t * a = (__global const uint16_t *)x[i].scales;
- aux[0] = a[im+0] & kmask1;
- aux[1] = a[im+2] & kmask1;
- aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
- aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
-
- float4 sum = (float4)(0.f);
- float smin = 0;
- for (int l = 0; l < n; ++l) {
- sum.x += y1[l+ 0] * ((ql1[l+ 0] & 0xF) + (qh[l+ 0] & (hm1 << 0) ? 16 : 0))
- + y1[l+16] * ((ql1[l+16] & 0xF) + (qh[l+16] & (hm1 << 0) ? 16 : 0));
- sum.y += y1[l+32] * ((ql1[l+ 0] >> 4) + (qh[l+ 0] & (hm1 << 1) ? 16 : 0))
- + y1[l+48] * ((ql1[l+16] >> 4) + (qh[l+16] & (hm1 << 1) ? 16 : 0));
- sum.z += y2[l+ 0] * ((ql2[l+ 0] & 0xF) + (qh[l+ 0] & (hm2 << 0) ? 16 : 0))
- + y2[l+16] * ((ql2[l+16] & 0xF) + (qh[l+16] & (hm2 << 0) ? 16 : 0));
- sum.w += y2[l+32] * ((ql2[l+ 0] >> 4) + (qh[l+ 0] & (hm2 << 1) ? 16 : 0))
- + y2[l+48] * ((ql2[l+16] >> 4) + (qh[l+16] & (hm2 << 1) ? 16 : 0));
- smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
- + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
- }
- tmp[16 * ix + tid] += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin;
-
- }
-
- // sum up partial sums and write back result
- barrier(CLK_LOCAL_MEM_FENCE);
- for (int s=16; s>0; s>>=1) {
- if (tid < s) {
- tmp[tid] += tmp[tid + s];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if (tid == 0) {
- dst[row] = tmp[0];
- }
-}
-
-__kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx, __local float* tmp, __global const float * yy, __global float * dst, const int ncols) {
-
- const int row = get_group_id(0);
-
- const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row + get_global_offset(0);
-
- __global const struct block_q6_K * x = xx + ib0;
-
- const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...31 or 0...16
- const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION; // 0 or 0, 1
-
- const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
-
- const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
- const int in = tid - step*im; // 0...15 or 0...7
-
-\n#if K_QUANTS_PER_ITERATION == 1\n
- const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
- const int is = 0;
-
-\n#else\n
-
- const int l0 = 4 * in; // 0, 4, 8, ..., 28
- const int is = in / 4;
-
-\n#endif\n
-
- const int ql_offset = 64*im + l0;
- const int qh_offset = 32*im + l0;
- const int s_offset = 8*im + is;
- const int y_offset = 128*im + l0;
-
- tmp[16 * ix + tid] = 0; // partial sum for thread in warp
-
- for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
-
- __global const float * y = yy + i * QK_K + y_offset;
- __global const uint8_t * ql = x[i].ql + ql_offset;
- __global const uint8_t * qh = x[i].qh + qh_offset;
- __global const int8_t * s = x[i].scales + s_offset;
-
- const float d = vload_half(0, &x[i].d);
-
-\n#if K_QUANTS_PER_ITERATION == 1\n
- float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
- + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
- + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
- + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
- + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
- + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
- + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
- +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
- tmp[16 * ix + tid] += sum;
-\n#else\n
- float sum = 0;
- for (int l = 0; l < 4; ++l) {
- sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
- + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
- + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
- + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
- }
- tmp[16 * ix + tid] += sum;
-\n#endif\n
-
- }
-
- // sum up partial sums and write back result
- barrier(CLK_LOCAL_MEM_FENCE);
- for (int s=16; s>0; s>>=1) {
- if (tid < s) {
- tmp[tid] += tmp[tid + s];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if (tid == 0) {
- dst[row] = tmp[0];
- }
-}
-);
-
-
-std::string dequant_template = MULTILINE_QUOTE(
-__kernel void KERNEL_NAME(__global X_TYPE* x, __global float* y) {
- const int i = get_group_id(0)*get_local_size(0) + get_local_id(0)*2;
-
- if (i >= get_global_size(0)) {
- return;
- }
-
- const uint qk = QUANT_K;
- const uint qr = QUANT_R;
-
- const int ib = i/qk + get_global_offset(0); // block index
- const int iqs = (i%qk)/qr; // quant index
- const int iybs = i - i%qk; // y block start index
- const int y_offset = qr == 1 ? 1 : qk/2;
-
- // dequantize
- float v0, v1;
- DEQUANT_FUNC(x, ib, iqs, &v0, &v1);
- y[iybs + iqs + 0] = v0;
- y[iybs + iqs + y_offset] = v1;
-}
-);
-
-std::string dequant_mul_mat_vec_template = MULTILINE_QUOTE(
-__kernel void KERNEL_NAME(__global X_TYPE* x, __local float* tmp, __global float* y, __global float* dst, const int ncols) {
- const int local_size = get_local_size(0);
- const int row = get_group_id(0);
- const int tid = get_local_id(0);
-
- const uint qk = QUANT_K;
- const uint qr = QUANT_R;
-
- const int col_step = local_size * 2;
- const int y_offset = qr == 1 ? 1 : qk/2;
-
- x += get_global_offset(0);
-
- tmp[tid] = 0;
-
- for (int col = tid*2; col < ncols; col += col_step) {
- const int ib = (row*ncols + col)/qk; // block index
- const int iqs = (col%qk)/qr; // quant index
- const int iybs = col - col%qk; // y block start index
-
- // dequantize
- float v0, v1;
- DEQUANT_FUNC(x, ib, iqs, &v0, &v1);
-
- // matrix multiplication
- tmp[tid] += v0 * y[iybs + iqs + 0];
- tmp[tid] += v1 * y[iybs + iqs + y_offset];
- }
-
- // sum up partial sums and write back result
- barrier(CLK_LOCAL_MEM_FENCE);
- for (int s=local_size/2; s>0; s>>=1) {
- if (tid < s) {
- tmp[tid] += tmp[tid + s];
- }
- barrier(CLK_LOCAL_MEM_FENCE);
- }
- if (tid == 0) {
- dst[row] = tmp[0];
- }
-}
-
-);
-
-
-std::string mul_template = MULTILINE_QUOTE(
-__kernel void KERNEL_NAME(__global TYPE* x, const int x_offset, __global TYPE* y, const int y_offset, __global TYPE* dst, const int dst_offset, const int ky) {
- const int i = get_group_id(0)*get_local_size(0) + get_local_id(0);
-
- if (i >= get_global_size(0)) {
- return;
- }
-
- dst[dst_offset + i] = x[x_offset + i] * y[y_offset + i%ky];
-}
-);
-
-std::string add_template = MULTILINE_QUOTE(
-__kernel void add_f32(__global float * x, const int x_offset, __global float * y, const int y_offset, __global float * dst, const int dst_offset, const int ky) {
- const int i = get_group_id(0)*get_local_size(0) + get_local_id(0);
-
- if (i >= get_global_size(0)) {
- return;
- }
-
- dst[dst_offset + i] = x[x_offset + i] + y[y_offset + i%ky];
-}
-);
-
-#define CL_CHECK(err) \
- do { \
- cl_int err_ = (err); \
- if (err_ != CL_SUCCESS) { \
- fprintf(stderr, "ggml_opencl: %s error %d at %s:%d\n", \
- #err, err_, __FILE__, __LINE__); \
- exit(1); \
- } \
- } while (0)
-
-#define CLBLAST_CHECK(err) \
- do { \
- CLBlastStatusCode err_ = (err); \
- if (err_ != CLBlastSuccess) { \
- fprintf(stderr, "ggml_opencl: %s error %d at %s:%d\n", \
- #err, err_, __FILE__, __LINE__); \
- exit(1); \
- } \
- } while (0)
-
-std::array dequant_str_keys = {
- "KERNEL_NAME", "X_TYPE", "QUANT_K", "QUANT_R", "DEQUANT_FUNC"
-};
-
-std::array dequant_str_values = {
- "dequantize_row_q4_0", "struct block_q4_0", "QK4_0", "QR4_0", "dequantize_q4_0",
- "dequantize_row_q4_1", "struct block_q4_1", "QK4_1", "QR4_1", "dequantize_q4_1",
- "dequantize_row_q5_0", "struct block_q5_0", "QK5_0", "QR5_0", "dequantize_q5_0",
- "dequantize_row_q5_1", "struct block_q5_1", "QK5_1", "QR5_1", "dequantize_q5_1",
- "dequantize_row_q8_0", "struct block_q8_0", "QK8_0", "QR8_0", "dequantize_q8_0",
- "convert_row_f16", "half", "1", "1", "convert_f16"
-};
-
-std::array dequant_mul_mat_vec_str_values = {
- "dequantize_mul_mat_vec_q4_0", "struct block_q4_0", "QK4_0", "QR4_0", "dequantize_q4_0",
- "dequantize_mul_mat_vec_q4_1", "struct block_q4_1", "QK4_1", "QR4_1", "dequantize_q4_1",
- "dequantize_mul_mat_vec_q5_0", "struct block_q5_0", "QK5_0", "QR5_0", "dequantize_q5_0",
- "dequantize_mul_mat_vec_q5_1", "struct block_q5_1", "QK5_1", "QR5_1", "dequantize_q5_1",
- "dequantize_mul_mat_vec_q8_0", "struct block_q8_0", "QK8_0", "QR8_0", "dequantize_q8_0",
- "convert_mul_mat_vec_f16", "half", "1", "1", "convert_f16"
-};
-
-std::array mul_str_keys = {
- "KERNEL_NAME", "TYPE"
-};
-std::array mul_str_values = {
- "mul_f32", "float"
-};
-
-static std::string& replace(std::string& s, const std::string& from, const std::string& to) {
- size_t pos = 0;
- while ((pos = s.find(from, pos)) != std::string::npos) {
- s.replace(pos, from.length(), to);
- pos += to.length();
- }
- return s;
-}
-
-static std::string generate_kernels() {
- std::stringstream src;
- src << program_source << '\n';
- src << k_quants_source << '\n';
- for (size_t i = 0; i < dequant_str_values.size(); i += dequant_str_keys.size()) {
- std::string dequant_kernel = dequant_template;
- std::string dmmv_kernel = dequant_mul_mat_vec_template;
- for (size_t j = 0; j < dequant_str_keys.size(); j++) {
- replace(dequant_kernel, dequant_str_keys[j], dequant_str_values[i + j]);
- replace(dmmv_kernel, dequant_str_keys[j], dequant_mul_mat_vec_str_values[i + j]);
- }
- src << dequant_kernel << '\n';
- src << dmmv_kernel << '\n';
- }
- for (size_t i = 0; i < mul_str_values.size(); i += mul_str_keys.size()) {
- std::string mul_kernel = mul_template;
- for (size_t j = 0; j < mul_str_keys.size(); j++) {
- replace(mul_kernel, mul_str_keys[j], mul_str_values[i + j]);
- }
- src << mul_kernel << '\n';
- }
- src << add_template << '\n';
-
- return src.str();
-}
-
-static cl_platform_id platform;
-static cl_device_id device;
-static cl_context context;
-static cl_command_queue queue;
-static cl_program program;
-static cl_kernel convert_row_f16_cl;
-static cl_kernel dequantize_row_q4_0_cl, dequantize_row_q4_1_cl, dequantize_row_q5_0_cl, dequantize_row_q5_1_cl, dequantize_row_q8_0_cl;
-static cl_kernel dequantize_mul_mat_vec_q4_0_cl, dequantize_mul_mat_vec_q4_1_cl, dequantize_mul_mat_vec_q5_0_cl, dequantize_mul_mat_vec_q5_1_cl, dequantize_mul_mat_vec_q8_0_cl, convert_mul_mat_vec_f16_cl;
-static cl_kernel dequantize_block_q2_k_cl, dequantize_block_q3_k_cl, dequantize_block_q4_k_cl, dequantize_block_q5_k_cl, dequantize_block_q6_k_cl;
-static cl_kernel dequantize_mul_mat_vec_q2_K_cl, dequantize_mul_mat_vec_q3_K_cl, dequantize_mul_mat_vec_q4_K_cl, dequantize_mul_mat_vec_q5_K_cl, dequantize_mul_mat_vec_q6_K_cl;
-static cl_kernel mul_f32_cl;
-static cl_kernel add_f32_cl;
-static bool fp16_support;
-
-static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, const char* program_buffer) {
- cl_program p;
- char *program_log;
- size_t program_size;
- size_t log_size;
- int err;
-
- program_size = strlen(program_buffer);
-
- p = clCreateProgramWithSource(ctx, 1, (const char**)&program_buffer, &program_size, &err);
- if(err < 0) {
- fprintf(stderr, "OpenCL error creating program");
- exit(1);
- }
-
- std::string compile_opts = "-cl-mad-enable -cl-unsafe-math-optimizations -cl-finite-math-only -cl-fast-relaxed-math "
- "-DQK4_0=32 -DQR4_0=2 -DQK4_1=32 -DQR4_1=2 -DQK5_0=32 -DQR5_0=2 -DQK5_1=32 -DQR5_1=2 -DQK8_0=32 -DQR8_0=1 "
- "-DQK_K=256 -DK_QUANTS_PER_ITERATION=" + std::to_string(K_QUANTS_PER_ITERATION);
-
- err = clBuildProgram(p, 0, NULL, compile_opts.c_str(), NULL, NULL);
- if(err < 0) {
-
- clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size);
- program_log = (char*) malloc(log_size + 1);
- program_log[log_size] = '\0';
- clGetProgramBuildInfo(p, dev, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL);
- fprintf(stderr, "ggml_opencl: kernel compile error:\n\n%s\n", program_log);
- free(program_log);
- exit(1);
- }
-
- return p;
-}
-
-void ggml_cl_init(void) {
- static bool initialized = false;
- if (initialized) {
- return;
- }
- initialized = true;
-
- cl_int err;
-
- struct cl_device;
- struct cl_platform {
- cl_platform_id id;
- unsigned number;
- char name[128];
- char vendor[128];
- struct cl_device * devices;
- unsigned n_devices;
- struct cl_device * default_device;
- };
-
- struct cl_device {
- struct cl_platform * platform;
- cl_device_id id;
- unsigned number;
- cl_device_type type;
- char name[128];
- };
-
- enum { NPLAT = 16, NDEV = 16 };
-
- struct cl_platform platforms[NPLAT];
- unsigned n_platforms = 0;
- struct cl_device devices[NDEV];
- unsigned n_devices = 0;
- struct cl_device * default_device = NULL;
-
- platform = NULL;
- device = NULL;
-
- cl_platform_id platform_ids[NPLAT];
- CL_CHECK(clGetPlatformIDs(NPLAT, platform_ids, &n_platforms));
-
- for (unsigned i = 0; i < n_platforms; i++) {
- struct cl_platform * p = &platforms[i];
- p->number = i;
- p->id = platform_ids[i];
- CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_NAME, sizeof(p->name), &p->name, NULL));
- CL_CHECK(clGetPlatformInfo(p->id, CL_PLATFORM_VENDOR, sizeof(p->vendor), &p->vendor, NULL));
-
- cl_device_id device_ids[NDEV];
- cl_int clGetDeviceIDsError = clGetDeviceIDs(p->id, CL_DEVICE_TYPE_ALL, NDEV, device_ids, &p->n_devices);
- if (clGetDeviceIDsError == CL_DEVICE_NOT_FOUND) {
- p->n_devices = 0;
- } else {
- CL_CHECK(clGetDeviceIDsError);
- }
- p->devices = p->n_devices > 0 ? &devices[n_devices] : NULL;
- p->default_device = NULL;
-
- for (unsigned j = 0; j < p->n_devices; j++) {
- struct cl_device * d = &devices[n_devices];
- d->number = n_devices++;
- d->id = device_ids[j];
- d->platform = p;
- CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_NAME, sizeof(d->name), &d->name, NULL));
- CL_CHECK(clGetDeviceInfo(d->id, CL_DEVICE_TYPE, sizeof(d->type), &d->type, NULL));
-
- if (p->default_device == NULL && d->type == CL_DEVICE_TYPE_GPU) {
- p->default_device = d;
- }
- }
-
- if (default_device == NULL && p->default_device != NULL) {
- default_device = p->default_device;
- }
- }
-
- if (n_devices == 0) {
- fprintf(stderr, "ggml_opencl: could find any OpenCL devices.\n");
- exit(1);
- }
-
- char * user_platform_string = getenv("GGML_OPENCL_PLATFORM");
- char * user_device_string = getenv("GGML_OPENCL_DEVICE");
- int user_platform_number = -1;
- int user_device_number = -1;
-
- unsigned n;
- if (user_platform_string != NULL && sscanf(user_platform_string, " %u", &n) == 1 && n < n_platforms) {
- user_platform_number = (int)n;
- }
- if (user_device_string != NULL && sscanf(user_device_string, " %u", &n) == 1 && n < n_devices) {
- user_device_number = (int)n;
- }
- if (user_platform_number != -1 && user_device_number != -1) {
- cl_platform* platform = &platforms[user_platform_number];
- if ((unsigned)user_device_number >= platform->n_devices) {
- fprintf(stderr, "ggml_opencl: invalid device number %d\n", user_device_number);
- exit(1);
- }
- default_device = &platform->devices[user_device_number];
- } else {
-
- struct cl_device * selected_devices = devices;
- unsigned n_selected_devices = n_devices;
-
- if (user_platform_number == -1 && user_platform_string != NULL && user_platform_string[0] != 0) {
- for (unsigned i = 0; i < n_platforms; i++) {
- struct cl_platform * p = &platforms[i];
- if (strstr(p->name, user_platform_string) != NULL ||
- strstr(p->vendor, user_platform_string) != NULL) {
- user_platform_number = (int)i;
- break;
- }
- }
- if (user_platform_number == -1) {
- fprintf(stderr, "ggml_opencl: no platform matching '%s' was found.\n", user_platform_string);
- exit(1);
- }
- }
- if (user_platform_number != -1) {
- struct cl_platform * p = &platforms[user_platform_number];
- selected_devices = p->devices;
- n_selected_devices = p->n_devices;
- default_device = p->default_device;
- if (n_selected_devices == 0) {
- fprintf(stderr, "ggml_opencl: selected platform '%s' does not have any devices.\n", p->name);
- exit(1);
- }
- }
-
- if (user_device_number == -1 && user_device_string != NULL && user_device_string[0] != 0) {
- for (unsigned i = 0; i < n_selected_devices; i++) {
- struct cl_device * d = &selected_devices[i];
- if (strstr(d->name, user_device_string) != NULL) {
- user_device_number = d->number;
- break;
- }
- }
- if (user_device_number == -1) {
- fprintf(stderr, "ggml_opencl: no device matching '%s' was found.\n", user_device_string);
- exit(1);
- }
- }
- if (user_device_number != -1) {
- selected_devices = &devices[user_device_number];
- n_selected_devices = 1;
- default_device = &selected_devices[0];
- }
-
- GGML_ASSERT(n_selected_devices > 0);
-
- if (default_device == NULL) {
- default_device = &selected_devices[0];
- }
- }
-
- fprintf(stderr, "ggml_opencl: selecting platform: '%s'\n", default_device->platform->name);
- fprintf(stderr, "ggml_opencl: selecting device: '%s'\n", default_device->name);
- if (default_device->type != CL_DEVICE_TYPE_GPU) {
- fprintf(stderr, "ggml_opencl: warning, not a GPU: '%s'.\n", default_device->name);
- }
-
- platform = default_device->platform->id;
- device = default_device->id;
-
- size_t ext_str_size;
- clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, 0, NULL, &ext_str_size);
- char *ext_buffer = (char *)alloca(ext_str_size + 1);
- clGetDeviceInfo(device, CL_DEVICE_EXTENSIONS, ext_str_size, ext_buffer, NULL);
- ext_buffer[ext_str_size] = '\0'; // ensure it is null terminated
- // Disabled due to faulty outputs
- // Check if ext_buffer contains cl_khr_fp16
- fp16_support = false; // strstr(ext_buffer, "cl_khr_fp16") != NULL;
- // fprintf(stderr, "ggml_opencl: device FP16 support: %s\n", fp16_support ? "true" : "false");
-
- cl_context_properties properties[] = {
- (intptr_t)CL_CONTEXT_PLATFORM, (intptr_t)platform, 0
- };
-
- CL_CHECK((context = clCreateContext(properties, 1, &device, NULL, NULL, &err), err));
-
- CL_CHECK((queue = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &err),
- (err != CL_INVALID_QUEUE_PROPERTIES && err != CL_INVALID_VALUE ? err :
- (queue = clCreateCommandQueue(context, device, 0, &err), err)
- )));
-
- const std::string kernel_src = generate_kernels();
-
- program = build_program_from_source(context, device, kernel_src.c_str());
-
- // FP16 to FP32 kernel
- CL_CHECK((convert_row_f16_cl = clCreateKernel(program, "convert_row_f16", &err), err));
-
- // Dequantize kernels
- CL_CHECK((dequantize_row_q4_0_cl = clCreateKernel(program, "dequantize_row_q4_0", &err), err));
- CL_CHECK((dequantize_row_q4_1_cl = clCreateKernel(program, "dequantize_row_q4_1", &err), err));
- CL_CHECK((dequantize_row_q5_0_cl = clCreateKernel(program, "dequantize_row_q5_0", &err), err));
- CL_CHECK((dequantize_row_q5_1_cl = clCreateKernel(program, "dequantize_row_q5_1", &err), err));
- CL_CHECK((dequantize_row_q8_0_cl = clCreateKernel(program, "dequantize_row_q8_0", &err), err));
- CL_CHECK((dequantize_row_q8_0_cl = clCreateKernel(program, "dequantize_row_q8_0", &err), err));
- CL_CHECK((dequantize_block_q2_k_cl = clCreateKernel(program, "dequantize_block_q2_K", &err), err));
- CL_CHECK((dequantize_block_q3_k_cl = clCreateKernel(program, "dequantize_block_q3_K", &err), err));
- CL_CHECK((dequantize_block_q4_k_cl = clCreateKernel(program, "dequantize_block_q4_K", &err), err));
- CL_CHECK((dequantize_block_q5_k_cl = clCreateKernel(program, "dequantize_block_q5_K", &err), err));
- CL_CHECK((dequantize_block_q6_k_cl = clCreateKernel(program, "dequantize_block_q6_K", &err), err));
-
- // dequant mul mat kernel
- CL_CHECK((dequantize_mul_mat_vec_q4_0_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q4_0", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q4_1_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q4_1", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q5_0_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q5_0", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q5_1_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q5_1", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q8_0_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q8_0", &err), err));
- CL_CHECK((convert_mul_mat_vec_f16_cl = clCreateKernel(program, "convert_mul_mat_vec_f16", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q2_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q2_K", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q3_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q3_K", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q4_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q4_K", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q5_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q5_K", &err), err));
- CL_CHECK((dequantize_mul_mat_vec_q6_K_cl = clCreateKernel(program, "dequantize_mul_mat_vec_q6_K", &err), err));
-
- // mul kernel
- CL_CHECK((mul_f32_cl = clCreateKernel(program, "mul_f32", &err), err));
-
- CL_CHECK((add_f32_cl = clCreateKernel(program, "add_f32", &err), err));
-}
-
-static cl_kernel* ggml_get_to_fp32_cl(ggml_type type) {
- switch (type) {
- case GGML_TYPE_Q4_0:
- return &dequantize_row_q4_0_cl;
- case GGML_TYPE_Q4_1:
- return &dequantize_row_q4_1_cl;
- case GGML_TYPE_Q5_0:
- return &dequantize_row_q5_0_cl;
- case GGML_TYPE_Q5_1:
- return &dequantize_row_q5_1_cl;
- case GGML_TYPE_Q8_0:
- return &dequantize_row_q8_0_cl;
- case GGML_TYPE_Q2_K:
- return &dequantize_block_q2_k_cl;
- case GGML_TYPE_Q3_K:
- return &dequantize_block_q3_k_cl;
- case GGML_TYPE_Q4_K:
- return &dequantize_block_q4_k_cl;
- case GGML_TYPE_Q5_K:
- return &dequantize_block_q5_k_cl;
- case GGML_TYPE_Q6_K:
- return &dequantize_block_q6_k_cl;
- case GGML_TYPE_F16:
- return &convert_row_f16_cl;
- default:
- return nullptr;
- }
-}
-
-static size_t ggml_cl_global_denom(ggml_type type) {
- switch (type) {
- case GGML_TYPE_Q4_0:
- case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q5_0:
- case GGML_TYPE_Q5_1:
- case GGML_TYPE_Q8_0:
- return 1;
- case GGML_TYPE_Q2_K:
- case GGML_TYPE_Q3_K:
- return 4;
- case GGML_TYPE_Q4_K:
- return 8;
- case GGML_TYPE_Q5_K:
- case GGML_TYPE_Q6_K:
- return 4;
- case GGML_TYPE_F16:
- default:
- return 1;
- }
-}
-
-static size_t ggml_cl_local_size(ggml_type type) {
- switch (type) {
- case GGML_TYPE_Q4_0:
- case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q5_0:
- case GGML_TYPE_Q5_1:
- case GGML_TYPE_Q8_0:
- return 0;
- case GGML_TYPE_Q2_K:
- case GGML_TYPE_Q3_K:
- return 64;
- case GGML_TYPE_Q4_K:
- return 32;
- case GGML_TYPE_Q5_K:
- case GGML_TYPE_Q6_K:
- return 64;
- case GGML_TYPE_F16:
- default:
- return 0;
- }
-}
-
-static cl_kernel* ggml_get_dequantize_mul_mat_vec_cl(ggml_type type) {
- switch (type) {
- case GGML_TYPE_Q4_0:
- return &dequantize_mul_mat_vec_q4_0_cl;
- case GGML_TYPE_Q4_1:
- return &dequantize_mul_mat_vec_q4_1_cl;
- case GGML_TYPE_Q5_0:
- return &dequantize_mul_mat_vec_q5_0_cl;
- case GGML_TYPE_Q5_1:
- return &dequantize_mul_mat_vec_q5_1_cl;
- case GGML_TYPE_Q8_0:
- return &dequantize_mul_mat_vec_q8_0_cl;
- case GGML_TYPE_F16:
- return &convert_mul_mat_vec_f16_cl;
- case GGML_TYPE_Q2_K:
- return &dequantize_mul_mat_vec_q2_K_cl;
- case GGML_TYPE_Q3_K:
- return &dequantize_mul_mat_vec_q3_K_cl;
- case GGML_TYPE_Q4_K:
- return &dequantize_mul_mat_vec_q4_K_cl;
- case GGML_TYPE_Q5_K:
- return &dequantize_mul_mat_vec_q5_K_cl;
- case GGML_TYPE_Q6_K:
- return &dequantize_mul_mat_vec_q6_K_cl;
- default:
- return nullptr;
- }
-}
-
-// buffer pool for cl
-#define MAX_CL_BUFFERS 256
-
-struct scoped_spin_lock {
- std::atomic_flag& lock;
- scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
- while (lock.test_and_set(std::memory_order_acquire)) {
- ; // spin
- }
- }
- ~scoped_spin_lock() {
- lock.clear(std::memory_order_release);
- }
- scoped_spin_lock(const scoped_spin_lock&) = delete;
- scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
-};
-
-struct cl_buffer {
- cl_mem mem;
- size_t size = 0;
-};
-
-static cl_buffer g_cl_buffer_pool[MAX_CL_BUFFERS];
-static std::atomic_flag g_cl_pool_lock = ATOMIC_FLAG_INIT;
-
-static cl_mem ggml_cl_pool_malloc(size_t size, size_t * actual_size) {
- scoped_spin_lock lock(g_cl_pool_lock);
- cl_int err;
-
- int best_i = -1;
- size_t best_size = std::numeric_limits::max(); //smallest unused buffer that fits our needs
- int worst_i = -1;
- size_t worst_size = 0; //largest unused buffer seen so far
- for (int i = 0; i < MAX_CL_BUFFERS; ++i) {
- cl_buffer &b = g_cl_buffer_pool[i];
- if (b.size > 0 && b.size >= size && b.size < best_size)
- {
- best_i = i;
- best_size = b.size;
- }
- if (b.size > 0 && b.size > worst_size)
- {
- worst_i = i;
- worst_size = b.size;
- }
- }
- if(best_i!=-1) //found the smallest buffer that fits our needs
- {
- cl_buffer& b = g_cl_buffer_pool[best_i];
- cl_mem mem = b.mem;
- *actual_size = b.size;
- b.size = 0;
- return mem;
- }
- if(worst_i!=-1) //no buffer that fits our needs, resize largest one to save memory
- {
- cl_buffer& b = g_cl_buffer_pool[worst_i];
- cl_mem mem = b.mem;
- b.size = 0;
- clReleaseMemObject(mem);
- }
- cl_mem mem;
- CL_CHECK((mem = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err), err));
- *actual_size = size;
- return mem;
-}
-
-static void ggml_cl_pool_free(cl_mem mem, size_t size) {
- scoped_spin_lock lock(g_cl_pool_lock);
-
- for (int i = 0; i < MAX_CL_BUFFERS; ++i) {
- cl_buffer& b = g_cl_buffer_pool[i];
- if (b.size == 0) {
- b.mem = mem;
- b.size = size;
- return;
- }
- }
- fprintf(stderr, "WARNING: cl buffer pool full, increase MAX_CL_BUFFERS\n");
- clReleaseMemObject(mem);
-}
-
-void ggml_cl_free_data(const struct ggml_tensor* tensor) {
- if (tensor->backend != GGML_BACKEND_TYPE_GPU) {
- return;
- }
-
- cl_mem mem = (cl_mem)tensor->extra;
- clReleaseMemObject(mem);
-}
-
-static cl_int ggml_cl_h2d_tensor_2d(cl_command_queue queue, cl_mem dst, size_t offset, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cl_event* ev) {
- cl_int err;
- const uint64_t ne0 = src->ne[0];
- const uint64_t ne1 = src->ne[1];
- const uint64_t nb0 = src->nb[0];
- const uint64_t nb1 = src->nb[1];
- const uint64_t nb2 = src->nb[2];
- const uint64_t nb3 = src->nb[3];
- const enum ggml_type type = src->type;
- const size_t ts = ggml_type_size(type);
- const size_t bs = ggml_blck_size(type);
- const uint64_t row_size = ts*ne0/bs;
-
- const char * x = (const char *) src->data + i2*nb2 + i3*nb3;
- if (nb0 == ts && nb1 == row_size) {
- return clEnqueueWriteBuffer(queue, dst, CL_FALSE, offset, ne1*row_size, x, 0, NULL, ev);
- }
- if (nb0 == ts) {
- const size_t buffer_origin[3] = { offset, 0, 0 };
- const size_t host_origin[3] = { 0, 0, 0 };
- const size_t region[3] = { row_size, ne1, 1 };
- return clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, row_size, 0, nb1, 0, x, 0, NULL, ev);
- }
- std::vector events;
- if (ev && ne1>1) events.reserve(ne1-1);
- for (uint64_t i1 = 0; i1 < ne1; i1++) {
- // pretend the row is a matrix with cols=1
- const size_t buffer_origin[3] = { offset + i1*row_size, 0, 0 };
- const size_t host_origin[3] = { 0, 0, 0 };
- const size_t region[3] = { ts, ne0/bs, 1 };
- // if an event is requested, make the last write wait for all previous writes to complete
- if (ev && i1) {
- events.push_back(*ev);
- }
- cl_uint nevents = i1 == ne1-1 ? events.size() : 0U;
- err = clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, ts, 0, nb0, 0, x + i1*nb1, nevents, nevents ? events.data() : nullptr, ev);
- if (err != CL_SUCCESS) {
- for (auto event : events) {
- clReleaseEvent(event);
- }
- return err;
- }
- }
- for (auto event : events) {
- CL_CHECK(clReleaseEvent(event));
- }
- return CL_SUCCESS;
-}
-
-static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src1->backend == GGML_BACKEND_TYPE_GPU);
- const int64_t ne00 = src0->ne[0];
- const int64_t ne01 = src0->ne[1];
- const int64_t ne02 = src0->ne[2];
- const int64_t ne03 = src0->ne[3];
- const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
- const int64_t ne12 = src1->ne[2];
- const int64_t ne13 = src1->ne[3];
- const int nb2 = dst->nb[2];
- const int nb3 = dst->nb[3];
- size_t x_size;
- size_t d_size;
-
- cl_mem d_X = ggml_cl_pool_malloc(ne00 * ne01 * sizeof(float), &x_size); // src0
- cl_mem d_Y = (cl_mem) src1->extra; // src1 is already on device, broadcasted.
- cl_mem d_D = ggml_cl_pool_malloc(ne00 * ne01 * sizeof(float), &d_size); // dst
-
-
- for (int64_t i03 = 0; i03 < ne03; i03++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- cl_event ev;
-
- // copy src0 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, &ev));
-
- const int64_t i13 = i03%ne13;
- const int64_t i12 = i02%ne12;
- const int i1 = i13*ne12*ne11 + i12*ne11;
-
- cl_int x_offset = 0;
- cl_int y_offset = i1*ne10;
- cl_int d_offset = 0;
-
- size_t global = ne00 * ne01;
- cl_int ky = ne10 * ne11;
-
- CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky));
- CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL));
-
- CL_CHECK(clReleaseEvent(ev));
- CL_CHECK(clFinish(queue));
-
- // copy dst to host
- float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * ne00*ne01, d, 0, NULL, NULL));
- }
- }
- ggml_cl_pool_free(d_X, x_size);
- ggml_cl_pool_free(d_D, d_size);
-}
-
-void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cl_mul_f32(src0, src1, dst);
-}
-
-static void ggml_cl_add_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src1->backend == GGML_BACKEND_TYPE_GPU);
- const int64_t ne00 = src0->ne[0];
- const int64_t ne01 = src0->ne[1];
- const int64_t ne02 = src0->ne[2];
- const int64_t ne03 = src0->ne[3];
- const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
- const int64_t ne12 = src1->ne[2];
- const int64_t ne13 = src1->ne[3];
- const int nb2 = dst->nb[2];
- const int nb3 = dst->nb[3];
- size_t x_size;
- size_t d_size;
-
- cl_mem d_X = ggml_cl_pool_malloc(ne00 * ne01 * sizeof(float), &x_size); // src0
- cl_mem d_Y = (cl_mem) src1->extra; // src1 is already on device, broadcasted.
- cl_mem d_D = ggml_cl_pool_malloc(ne00 * ne01 * sizeof(float), &d_size); // dst
-
-
- for (int64_t i03 = 0; i03 < ne03; i03++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- cl_event ev;
-
- // copy src0 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, &ev));
-
- const int64_t i13 = i03%ne13;
- const int64_t i12 = i02%ne12;
- const int i1 = i13*ne12*ne11 + i12*ne11;
-
- cl_int x_offset = 0;
- cl_int y_offset = i1*ne10;
- cl_int d_offset = 0;
-
- size_t global = ne00 * ne01;
- cl_int ky = ne10 * ne11;
-
- CL_CHECK(clSetKernelArg(add_f32_cl, 0, sizeof(cl_mem), &d_X));
- CL_CHECK(clSetKernelArg(add_f32_cl, 1, sizeof(cl_int), &x_offset));
- CL_CHECK(clSetKernelArg(add_f32_cl, 2, sizeof(cl_mem), &d_Y));
- CL_CHECK(clSetKernelArg(add_f32_cl, 3, sizeof(cl_int), &y_offset));
- CL_CHECK(clSetKernelArg(add_f32_cl, 4, sizeof(cl_mem), &d_D));
- CL_CHECK(clSetKernelArg(add_f32_cl, 5, sizeof(cl_int), &d_offset));
- CL_CHECK(clSetKernelArg(add_f32_cl, 6, sizeof(cl_int), &ky));
- CL_CHECK(clEnqueueNDRangeKernel(queue, add_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL));
-
- CL_CHECK(clReleaseEvent(ev));
- CL_CHECK(clFinish(queue));
-
- // copy dst to host
- float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * ne00*ne01, d, 0, NULL, NULL));
- }
- }
- ggml_cl_pool_free(d_X, x_size);
- ggml_cl_pool_free(d_D, d_size);
-}
-
-void ggml_cl_add(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cl_add_f32(src0, src1, dst);
-}
-
-static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- const int64_t ne00 = src0->ne[0];
- const int64_t ne01 = src0->ne[1];
- const int64_t ne02 = src0->ne[2];
- const int64_t ne03 = src0->ne[3];
-
- const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
- const int64_t ne12 = src1->ne[2];
- const int64_t ne13 = src1->ne[3];
-
- const int nb2 = dst->nb[2];
- const int nb3 = dst->nb[3];
-
- const int64_t r2 = ne12 / ne02;
- const int64_t r3 = ne13 / ne03;
-
- const float alpha = 1.0f;
- const float beta = 0.0f;
- const int x_ne = ne01 * ne00;
- const int y_ne = ne11 * ne10;
- const int d_ne = ne11 * ne01;
-
- size_t x_size;
- size_t y_size;
- size_t d_size;
- cl_mem d_X;
- if (src0->backend == GGML_BACKEND_TYPE_GPU) { // NOLINT
- d_X = (cl_mem) src0->extra;
- } else {
- d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
- }
- cl_mem d_Y = src1->backend == GGML_BACKEND_TYPE_GPU ? (cl_mem) src1->extra : ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
- cl_mem d_D = dst->backend == GGML_BACKEND_TYPE_GPU ? (cl_mem) dst->extra : ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
-
- size_t x_offset = 0;
-
- for (int64_t i03 = 0; i03 < ne03; i03++) {
- // TODO: copy src0 here when r3>1
- for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- if (src0->backend == GGML_BACKEND_TYPE_GPU) {
- x_offset = (i03 * ne02 + i02) * x_ne;
- } else {
- // copy src0 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
- }
-
- for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
- // copy src1 to device
- if (src1->backend == GGML_BACKEND_TYPE_CPU) {
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
- }
-
- CL_CHECK(clFinish(queue));
-
- // compute
- cl_event ev_sgemm;
- clblast::StatusCode status = clblast::Gemm(clblast::Layout::kColMajor,
- clblast::Transpose::kYes, clblast::Transpose::kNo,
- ne01, ne11, ne10,
- alpha,
- d_X, x_offset, ne00,
- d_Y, 0, ne10,
- beta,
- d_D, 0, ne01,
- &queue, &ev_sgemm);
-
- if (status != clblast::StatusCode::kSuccess) {
- GGML_ASSERT(false);
- }
-
- // copy dst to host
- if (dst->backend == GGML_BACKEND_TYPE_CPU) {
- float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
- }
- }
- }
- }
- }
-
- if (src0->backend != GGML_BACKEND_TYPE_GPU) {
- ggml_cl_pool_free(d_X, x_size);
- }
- if (src1->backend != GGML_BACKEND_TYPE_GPU) {
- ggml_cl_pool_free(d_Y, y_size);
- }
- if (dst->backend != GGML_BACKEND_TYPE_GPU) {
- ggml_cl_pool_free(d_D, d_size);
- }
-}
-
-static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t wsize) {
- GGML_ASSERT(fp16_support);
-
- const int64_t ne00 = src0->ne[0];
- const int64_t ne01 = src0->ne[1];
- const int64_t ne02 = src0->ne[2];
- const int64_t ne03 = src0->ne[3];
-
- const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
- const int64_t ne12 = src1->ne[2];
- const int64_t ne13 = src1->ne[3];
-
- const int nb10 = src1->nb[0];
- const int nb11 = src1->nb[1];
- const int nb12 = src1->nb[2];
- const int nb13 = src1->nb[3];
-
- const int nb2 = dst->nb[2];
- const int nb3 = dst->nb[3];
-
- const int64_t r2 = ne12 / ne02;
- const int64_t r3 = ne13 / ne03;
-
- const ggml_fp16_t alpha = ggml_fp32_to_fp16(1.0f);
- const ggml_fp16_t beta = ggml_fp32_to_fp16(0.0f);
- const int x_ne = ne01 * ne00;
- const int y_ne = ne11 * ne10;
- const int d_ne = ne11 * ne01;
-
- GGML_ASSERT(wsize >= sizeof(ggml_fp16_t) * y_ne);
- GGML_ASSERT(wsize >= sizeof(ggml_fp16_t) * d_ne);
- ggml_fp16_t * const tmp = (ggml_fp16_t *) wdata;
-
- size_t x_size;
- size_t y_size;
- size_t d_size;
- cl_mem d_X;
- if (src0->backend == GGML_BACKEND_TYPE_GPU) { // NOLINT
- d_X = (cl_mem) src0->extra;
- } else {
- d_X = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * x_ne, &x_size);
- }
- cl_mem d_Y = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * y_ne, &y_size);
- cl_mem d_D = ggml_cl_pool_malloc(sizeof(ggml_fp16_t) * d_ne, &d_size);
-
- bool src1_cont_rows = nb10 == sizeof(float);
- bool src1_cont_cols = (size_t)nb11 == ne11*sizeof(float);
-
- size_t x_offset = 0;
-
- for (int64_t i03 = 0; i03 < ne03; i03++) {
- // TODO: copy src0 here when r3>1
- for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- if (src0->backend == GGML_BACKEND_TYPE_GPU) {
- x_offset = (i03 * ne02 + i02) * x_ne;
- } else {
- // copy src0 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
- }
-
- // FIXME: convert on device
-
- for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
- // convert src1 to fp16
- // TODO: use multiple threads
- char * src1i = (char *) src1->data + i13*nb13 + i12*nb12;
- if (src1_cont_rows) {
- if (src1_cont_cols) {
- ggml_fp32_to_fp16_row((float *) src1i, tmp, ne10*ne11);
- }
- else {
- for (int64_t i11 = 0; i11 < ne11; i11++) {
- ggml_fp32_to_fp16_row((float *) (src1i + i11*nb11), tmp + i11*ne10, ne10);
- }
- }
- }
- else {
- for (int64_t i11 = 0; i11 < ne11; i11++) {
- for (int64_t i10 = 0; i10 < ne10; i10++) {
- // very slow due to no inlining
- tmp[i11*ne10 + i10] = ggml_fp32_to_fp16(*(float *) (src1i + i11*nb11 + i10*nb10));
- }
- }
- }
-
- // copy src1 to device
- CL_CHECK(clEnqueueWriteBuffer(queue, d_Y, false, 0, sizeof(ggml_fp16_t) * y_ne, tmp, 0, NULL, NULL));
-
- CL_CHECK(clFinish(queue));
-
- // compute
- cl_event ev_sgemm;
- clblast::StatusCode status = clblast::Gemm(clblast::Layout::kColMajor,
- clblast::Transpose::kYes, clblast::Transpose::kNo,
- ne01, ne11, ne10,
- alpha,
- d_X, x_offset, ne00,
- d_Y, 0, ne10,
- beta,
- d_D, 0, ne01,
- &queue, &ev_sgemm);
-
- if (status != clblast::StatusCode::kSuccess) {
- GGML_ASSERT(false);
- }
-
- // copy dst to host, then convert to float
- if (dst->backend == GGML_BACKEND_TYPE_CPU) {
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
- float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
- ggml_fp16_to_fp32_row(tmp, d, d_ne);
- } else {
- // FIXME: convert dst to fp32 on device
- }
- }
- }
- }
- }
-
- if (src0->backend != GGML_BACKEND_TYPE_GPU) {
- ggml_cl_pool_free(d_X, x_size);
- }
- ggml_cl_pool_free(d_Y, y_size);
- ggml_cl_pool_free(d_D, d_size);
-}
-
-static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- const int64_t ne00 = src0->ne[0];
- const int64_t ne01 = src0->ne[1];
- const int64_t ne02 = src0->ne[2];
- const int64_t ne03 = src0->ne[3];
-
- const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
- const int64_t ne12 = src1->ne[2];
- const int64_t ne13 = src1->ne[3];
-
- const int nb2 = dst->nb[2];
- const int nb3 = dst->nb[3];
- const ggml_type type = src0->type;
- const bool mul_mat_vec = ne11 == 1 && ne00%2 == 0;
-
- const int64_t r2 = ne12 / ne02;
- const int64_t r3 = ne13 / ne03;
-
- const float alpha = 1.0f;
- const float beta = 0.0f;
- const int x_ne = ne01 * ne00;
- const int y_ne = ne11 * ne10;
- const int d_ne = ne11 * ne01;
- const int x_bps = x_ne / ggml_blck_size(type); // blocks per 2D slice
- const size_t q_sz = ggml_type_size(type) * x_bps;
-
- size_t x_size;
- size_t y_size;
- size_t d_size;
- size_t q_size;
- cl_mem d_X;
- if (!mul_mat_vec) {
- d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size);
- }
- cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
- cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
- cl_mem d_Q;
- if (src0->backend == GGML_BACKEND_TYPE_CPU) {
- d_Q = ggml_cl_pool_malloc(q_sz, &q_size);
- }
-
- cl_kernel* to_fp32_cl = ggml_get_to_fp32_cl(type);
- cl_kernel* dmmv = ggml_get_dequantize_mul_mat_vec_cl(type);
- GGML_ASSERT(to_fp32_cl != nullptr);
-
- const size_t global_denom = ggml_cl_global_denom(type);
- const size_t local = mul_mat_vec ? CL_DMMV_LOCAL_SIZE : ggml_cl_local_size(type);
-
- size_t ev_idx = 0;
- std::vector events;
-
- for (int64_t i03 = 0; i03 < ne03; i03++) {
- // TODO: copy and dequantize src0 here when r3>1
- for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- // copy src0 to device if necessary
- if (src0->backend == GGML_BACKEND_TYPE_CPU) {
- events.emplace_back();
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++));
- } else if (src0->backend == GGML_BACKEND_TYPE_GPU) {
- d_Q = (cl_mem) src0->extra;
- } else {
- GGML_ASSERT(false);
- }
-
- if (!mul_mat_vec) {
- // convert src0 to fp32 on device
- const size_t global = x_ne / global_denom;
- const size_t offset = src0->backend == GGML_BACKEND_TYPE_GPU ? (i03 * ne02 + i02) * x_bps : 0;
- CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q));
- CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X));
- CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, &offset, &global, local > 0 ? &local : NULL, events.size(), !events.empty() ? events.data() : NULL, NULL));
- }
-
- int64_t i12 = i02 * r2;
- int64_t e12 = i12 + r2;
- events.reserve(e12 - i12);
- for (; i12 < e12; i12++) {
- if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel
- // copy src1 to device
- events.emplace_back();
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, events.data() + ev_idx++));
-
- // compute
- const size_t global = ne01 * local;
- const size_t offset = src0->backend == GGML_BACKEND_TYPE_GPU ? (i03 * ne02 + i02) * x_bps : 0;
- const cl_int ncols = ne00;
- events.emplace_back();
- CL_CHECK(clSetKernelArg(*dmmv, 0, sizeof(cl_mem), &d_Q));
- CL_CHECK(clSetKernelArg(*dmmv, 1, sizeof(float) * local, NULL));
- CL_CHECK(clSetKernelArg(*dmmv, 2, sizeof(cl_mem), &d_Y));
- CL_CHECK(clSetKernelArg(*dmmv, 3, sizeof(cl_mem), &d_D));
- CL_CHECK(clSetKernelArg(*dmmv, 4, sizeof(cl_int), &ncols));
- CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, &offset, &global, &local, events.size() - 1, events.data(), events.data() + ev_idx++));
- } else { // CLBlast matrix matrix multiplication
- // copy src1 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
-
- // wait for conversion
- CL_CHECK(clFinish(queue));
-
- // compute
- events.emplace_back();
- clblast::StatusCode status = clblast::Gemm(clblast::Layout::kColMajor,
- clblast::Transpose::kYes, clblast::Transpose::kNo,
- ne01, ne11, ne10,
- alpha,
- d_X, 0, ne00,
- d_Y, 0, ne10,
- beta,
- d_D, 0, ne01,
- &queue, events.data() + ev_idx++);
-
- if (status != clblast::StatusCode::kSuccess) {
- GGML_ASSERT(false);
- }
- }
-
- // copy dst to host
- float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &events[events.size() - 1], NULL));
- for (auto *event : events) {
- clReleaseEvent(event);
- }
-
- ev_idx = 0;
- events.clear();
- }
- }
- }
- }
-
- if (!mul_mat_vec) {
- ggml_cl_pool_free(d_X, x_size);
- }
- ggml_cl_pool_free(d_Y, y_size);
- ggml_cl_pool_free(d_D, d_size);
- if (src0->backend == GGML_BACKEND_TYPE_CPU) {
- ggml_cl_pool_free(d_Q, q_size);
- }
-}
-
-
-bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst) {
- const int64_t ne10 = src1->ne[0];
-
- const int64_t ne0 = dst->ne[0];
- const int64_t ne1 = dst->ne[1];
-
- // TODO: find the optimal values for these
- if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
- src1->type == GGML_TYPE_F32 &&
- dst->type == GGML_TYPE_F32 &&
- ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32) || src0->backend == GGML_BACKEND_TYPE_GPU)) {
- return true;
- }
-
- return false;
-}
-
-static bool ggml_cl_mul_mat_use_f16(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * /* dst */) {
- // If device doesn't support FP16
- if (!fp16_support) {
- return false;
- }
-
- size_t src0_sz = ggml_nbytes(src0);
- size_t src1_sz = ggml_nbytes(src1);
-
- // mul_mat_q: src0 is converted to fp32 on device
- size_t mul_mat_q_transfer = src0_sz + src1_sz;
-
- // mul_mat_f16: src1 is converted to fp16 on cpu
- size_t mul_mat_f16_transfer = src0_sz + sizeof(ggml_fp16_t) * ggml_nelements(src1);
-
- // choose the smaller one to transfer to the device
- // TODO: this is not always the best choice due to the overhead of converting to fp16
- return mul_mat_f16_transfer < mul_mat_q_transfer;
-}
-
-void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize) {
- GGML_ASSERT(ggml_cl_can_mul_mat(src0, src1, dst));
-
- if (src0->type == GGML_TYPE_F32) {
- ggml_cl_mul_mat_f32(src0, src1, dst);
- }
- else if (src0->type == GGML_TYPE_F16) {
- if (ggml_cl_mul_mat_use_f16(src0, src1, dst)) {
- ggml_cl_mul_mat_f16(src0, src1, dst, wdata, wsize);
- }
- else {
- ggml_cl_mul_mat_q_f32(src0, src1, dst);
- }
- }
- else if (ggml_is_quantized(src0->type)) {
- ggml_cl_mul_mat_q_f32(src0, src1, dst);
- }
- else {
- GGML_ASSERT(false);
- }
-}
-
-size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
- if (src0->type == GGML_TYPE_F16 && ggml_cl_mul_mat_use_f16(src0, src1, dst)) {
- return sizeof(ggml_fp16_t) * std::max(src1->ne[0] * src1->ne[1], dst->ne[0] * dst->ne[1]);
- }
- return 0;
-}
-
-void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) {
- const int64_t ne0 = tensor->ne[0];
- const int64_t ne1 = tensor->ne[1];
- const int64_t ne2 = tensor->ne[2];
- const int64_t ne3 = tensor->ne[3];
-
- const ggml_type type = tensor->type;
- const size_t s_sz = ggml_type_size(type) * (size_t) (ne0 * ne1 / ggml_blck_size(type));
- const size_t q_sz = s_sz * (size_t) (ne2 * ne3);
-
- size_t q_size;
- cl_mem dst = ggml_cl_pool_malloc(q_sz, &q_size);
-
- tensor->data = data;
- // copy tensor to device
- size_t offset = 0;
- for (int64_t i3 = 0; i3 < ne3; i3++) {
- for (int64_t i2 = 0; i2 < ne2; i2++) {
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, dst, offset, tensor, i3, i2, NULL));
- offset += s_sz;
- }
- }
-
- CL_CHECK(clFinish(queue));
-
- tensor->extra = dst;
- GGML_ASSERT(tensor->backend == GGML_BACKEND_TYPE_GPU);
-}
-
-// ggml-backend
-
-// buffer
-
-struct ggml_backend_opencl_buffer_context {
- ~ggml_backend_opencl_buffer_context() {
- if (buffer) {
- clReleaseMemObject(buffer);
- }
- for (auto * sub_buffer : sub_buffers) {
- clReleaseMemObject(sub_buffer);
- }
- }
-
- cl_mem buffer;
- std::vector sub_buffers;
-};
-
-static void * const cl_ptr_base = (void *)(uintptr_t) 0x1000;
-
-static const char * ggml_backend_opencl_buffer_get_name(ggml_backend_buffer_t buffer) {
- return "OpenCL";
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_opencl_buffer_free_buffer(ggml_backend_buffer_t buffer) {
- ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
- delete ctx;
-}
-
-static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) {
- return cl_ptr_base;
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
- if (tensor->view_src != NULL && tensor->view_offs == 0) {
- tensor->extra = tensor->view_src->extra;
- } else {
- ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
- cl_buffer_region region = {(size_t)((char *)tensor->data - (char *)cl_ptr_base), ggml_nbytes(tensor)};
- cl_int err;
- cl_mem sub_buffer = clCreateSubBuffer(ctx->buffer, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
- CL_CHECK(err);
- ctx->sub_buffers.push_back(sub_buffer);
- tensor->extra = sub_buffer;
- }
- tensor->backend = GGML_BACKEND_TYPE_GPU;
-}
-
-static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
- cl_mem tensor_buffer = (cl_mem) tensor->extra;
- CL_CHECK(clEnqueueWriteBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL));
- CL_CHECK(clFinish(queue));
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
- cl_mem tensor_buffer = (cl_mem) tensor->extra;
- CL_CHECK(clEnqueueReadBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL));
- CL_CHECK(clFinish(queue));
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_opencl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
- ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
- CL_CHECK(clEnqueueFillBuffer(queue, ctx->buffer, &value, sizeof(value), 0, buffer->size, 0, NULL, NULL));
- CL_CHECK(clFinish(queue));
-}
-
-static void ggml_backend_opencl_buffer_reset(ggml_backend_buffer_t buffer) {
- ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
- for (auto * sub_buffer : ctx->sub_buffers) {
- clReleaseMemObject(sub_buffer);
- }
- ctx->sub_buffers.clear();
-}
-
-static ggml_backend_buffer_i ggml_backend_opencl_buffer_interface = {
- /* .get_name = */ ggml_backend_opencl_buffer_get_name,
- /* .free_buffer = */ ggml_backend_opencl_buffer_free_buffer,
- /* .get_base = */ ggml_backend_opencl_buffer_get_base,
- /* .init_tensor = */ ggml_backend_opencl_buffer_init_tensor,
- /* .set_tensor = */ ggml_backend_opencl_buffer_set_tensor,
- /* .get_tensor = */ ggml_backend_opencl_buffer_get_tensor,
- /* .cpy_tensor = */ NULL,
- /* .clear = */ ggml_backend_opencl_buffer_clear,
- /* .reset = */ ggml_backend_opencl_buffer_reset,
-};
-
-// buffer type
-
-static const char * ggml_backend_opencl_buffer_type_name(ggml_backend_buffer_type_t buffer_type) {
- return "OpenCL";
-
- GGML_UNUSED(buffer_type);
-}
-
-static ggml_backend_buffer_t ggml_backend_opencl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buffer_type, size_t size) {
- ggml_cl_init();
-
- cl_int err;
- cl_mem mem = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err);
- if (err != CL_SUCCESS) {
- fprintf(stderr, "%s: failed to allocate %.2f MiB\n", __func__, size / 1024.0 / 1024.0);
- return nullptr;
- }
-
- ggml_backend_opencl_buffer_context * ctx = new ggml_backend_opencl_buffer_context{mem, {}};
-
- return ggml_backend_buffer_init(buffer_type, ggml_backend_opencl_buffer_interface, ctx, size);
-}
-
-static size_t ggml_backend_opencl_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) {
- // FIXME: not thread safe, device may not be initialized yet
- static cl_uint alignment = -1;
- if (alignment == (cl_uint)-1) {
- ggml_cl_init();
- clGetDeviceInfo(device, CL_DEVICE_MEM_BASE_ADDR_ALIGN, sizeof(cl_uint), &alignment, NULL);
- alignment /= 8; // bits to bytes
- }
- return alignment;
-
- GGML_UNUSED(buffer_type);
-}
-
-static size_t ggml_backend_opencl_buffer_type_get_max_size(ggml_backend_buffer_type_t buffer_type) {
- static size_t max_size = -1;
- if (max_size == (size_t)-1) {
- ggml_cl_init();
- clGetDeviceInfo(device, CL_DEVICE_MAX_MEM_ALLOC_SIZE, sizeof(size_t), &max_size, NULL);
- }
- return max_size;
-}
-
-static bool ggml_backend_opencl_buffer_type_supports_backend(ggml_backend_buffer_type_t buffer_type, ggml_backend_t backend) {
- //return ggml_backend_is_opencl(backend); // opencl must be used through the cpu backend
- return ggml_backend_is_cpu(backend);
-
- GGML_UNUSED(buffer_type);
-}
-
-static ggml_backend_buffer_type_i ggml_backend_opencl_buffer_type_interface = {
- /* .get_name = */ ggml_backend_opencl_buffer_type_name,
- /* .alloc_buffer = */ ggml_backend_opencl_buffer_type_alloc_buffer,
- /* .get_alignment = */ ggml_backend_opencl_buffer_type_get_alignment,
- /* .get_max_size = */ ggml_backend_opencl_buffer_type_get_max_size,
- /* .get_alloc_size = */ NULL,
- /* .supports_backend = */ ggml_backend_opencl_buffer_type_supports_backend,
- /* .is_host = */ NULL,
-};
-
-
-ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type() {
- static ggml_backend_buffer_type buffer_type = {
- /* .iface = */ ggml_backend_opencl_buffer_type_interface,
- /* .context = */ nullptr,
- };
-
- return &buffer_type;
-}
-
-#if 0
-// host buffer type
-
-static const char * ggml_backend_opencl_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
- return "CL_Host";
-
- GGML_UNUSED(buft);
-}
-
-static const char * ggml_backend_opencl_host_buffer_name(ggml_backend_buffer_t buffer) {
- return "CL_Host";
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_opencl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
- ggml_cl_host_free(buffer->context);
-}
-
-static ggml_backend_buffer_t ggml_backend_opencl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
- void * ptr = ggml_cl_host_malloc(size);
-
- if (ptr == nullptr) {
- // fallback to cpu buffer
- return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
- }
-
- ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
- buffer->buft = buft;
- buffer->iface.get_name = ggml_backend_opencl_host_buffer_name;
- buffer->iface.free_buffer = ggml_backend_opencl_host_buffer_free_buffer;
-
- return buffer;
-}
-
-ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type() {
- static struct ggml_backend_buffer_type ggml_backend_opencl_buffer_type_host = {
- /* .iface = */ {
- /* .get_name = */ ggml_backend_opencl_host_buffer_type_name,
- /* .alloc_buffer = */ ggml_backend_opencl_host_buffer_type_alloc_buffer,
- /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
- /* .get_max_size = */ NULL, // defaults to SIZE_MAX
- /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
- /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend,
- /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
- },
- /* .context = */ nullptr,
- };
-
- return &ggml_backend_opencl_buffer_type_host;
-}
-
-// backend
-
-static const char * ggml_backend_opencl_name(ggml_backend_t backend) {
- return "OpenCL";
-
- GGML_UNUSED(backend);
-}
-
-static void ggml_backend_opencl_free(ggml_backend_t backend) {
- GGML_UNUSED(backend);
-}
-
-static ggml_backend_buffer_type_t ggml_backend_opencl_get_default_buffer_type(ggml_backend_t backend) {
- return ggml_backend_opencl_buffer_type();
-
- GGML_UNUSED(backend);
-}
-
-static ggml_status ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) {
- for (int i = 0; i < graph->n_nodes; ++i) {
- ggml_tensor * node = graph->nodes[i];
-
- if (ggml_is_empty(node)) {
- continue;
- }
-
- switch (node->op) {
- case GGML_OP_MUL_MAT:
- ggml_cl_mul_mat(node->src[0], node->src[1], node, nullptr, 0);
- break;
- case GGML_OP_MUL:
- ggml_cl_mul(node->src[0], node->src[1], node);
- break;
- default:
- GGML_ASSERT(false);
- }
- }
-
- return GGML_STATUS_SUCCESS;
-
- GGML_UNUSED(backend);
-}
-
-static bool ggml_backend_opencl_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
- switch (op->op) {
- case GGML_OP_MUL_MAT:
- return ggml_cl_can_mul_mat(op->src[0], op->src[1], op);
- case GGML_OP_MUL:
- // return ggml_can_repeat_rows(op->src[1], op->src[0]);
- return true;
- default:
- return false;
- }
-
- GGML_UNUSED(backend);
-}
-
-static ggml_backend_i opencl_backend_i = {
- /* .get_name = */ ggml_backend_opencl_name,
- /* .free = */ ggml_backend_opencl_free,
- /* .get_default_buffer_type = */ ggml_backend_opencl_get_default_buffer_type,
- /* .set_tensor_async = */ NULL,
- /* .get_tensor_async = */ NULL,
- /* .cpy_tensor_from_async = */ NULL,
- /* .cpy_tensor_to_async = */ NULL,
- /* .synchronize = */ NULL,
- /* .graph_plan_create = */ NULL,
- /* .graph_plan_free = */ NULL,
- /* .graph_plan_compute = */ NULL,
- /* .graph_compute = */ ggml_backend_opencl_graph_compute,
- /* .supports_op = */ ggml_backend_opencl_supports_op,
-};
-
-ggml_backend_t ggml_backend_opencl_init() {
- ggml_backend_t backend = new ggml_backend {
- /* .interface = */ opencl_backend_i,
- /* .context = */ nullptr
- };
-
- return backend;
-}
-
-bool ggml_backend_is_opencl(ggml_backend_t backend) {
- return backend && backend->iface.get_name == ggml_backend_opencl_name;
-}
-#endif
diff --git a/ggml-opencl.h b/ggml-opencl.h
deleted file mode 100644
index 257a6be6af5ec..0000000000000
--- a/ggml-opencl.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#pragma once
-
-#include "ggml.h"
-#include "ggml-backend.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-GGML_API void ggml_cl_init(void);
-
-GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
-GGML_API void ggml_cl_add(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
-GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst);
-GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
-GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
-
-// GGML_API void * ggml_cl_host_malloc(size_t size);
-// GGML_API void ggml_cl_host_free(void * ptr);
-
-GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor);
-
-GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
-
-// backend API
-
-// GGML_API ggml_backend_t ggml_backend_opencl_init(void);
-
-// GGML_API bool ggml_backend_is_opencl(ggml_backend_t backend);
-
-GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void);
-// GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp
index 5cd97e4ff98df..3ff76474d7e72 100644
--- a/ggml-sycl.cpp
+++ b/ggml-sycl.cpp
@@ -8928,49 +8928,6 @@ static void rope_neox(
dst[i + n_dims/2] = x0*sin_theta + x1*cos_theta;
}
-static void rope_glm_f32(
- const float * x, float * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base,
- int n_ctx
-, const sycl::nd_item<3> &item_ct1) {
- const int col = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
- item_ct1.get_local_id(2);
- const int half_n_dims = ncols/4;
-
- if (col >= half_n_dims) {
- return;
- }
-
- const int row = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
- item_ct1.get_local_id(1);
- const int i = row*ncols + col;
- const int i2 = row/p_delta_rows;
-
- const float col_theta_scale = dpct::pow(freq_base, -2.0f * col / ncols);
- // FIXME: this is likely wrong
- const int p = pos != nullptr ? pos[i2] : 0;
-
- const float theta = sycl::min(p, n_ctx - 2) * freq_scale * col_theta_scale;
- const float sin_theta = sycl::sin((float)theta);
- const float cos_theta = sycl::cos((float)theta);
-
- const float x0 = x[i + 0];
- const float x1 = x[i + half_n_dims];
-
- dst[i + 0] = x0*cos_theta - x1*sin_theta;
- dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta;
-
- const float block_theta =
- ((float)sycl::max(p - n_ctx - 2, 0)) * col_theta_scale;
- const float sin_block_theta = sycl::sin((float)block_theta);
- const float cos_block_theta = sycl::cos((float)block_theta);
-
- const float x2 = x[i + half_n_dims * 2];
- const float x3 = x[i + half_n_dims * 3];
-
- dst[i + half_n_dims * 2] = x2*cos_block_theta - x3*sin_block_theta;
- dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
-}
-
static void k_sum_rows_f32(const float * x, float * dst, const int ncols,
const sycl::nd_item<3> &item_ct1) {
const int row = item_ct1.get_group(1);
@@ -12520,22 +12477,6 @@ static void rope_neox_sycl(const T *x, T *dst, int ncols, int n_dims, int nrows,
}
}
-static void rope_glm_f32_sycl(const float *x, float *dst, int ncols, int nrows,
- const int32_t *pos, float freq_scale,
- int p_delta_rows, float freq_base, int n_ctx,
- dpct::queue_ptr stream) {
- GGML_ASSERT(ncols % 4 == 0);
- const sycl::range<3> block_dims(1, 1, SYCL_ROPE_BLOCK_SIZE / 4);
- const int num_blocks_x = (ncols + SYCL_ROPE_BLOCK_SIZE - 1) / SYCL_ROPE_BLOCK_SIZE;
- const sycl::range<3> block_nums(1, nrows, num_blocks_x);
- stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
- [=](sycl::nd_item<3> item_ct1) {
- rope_glm_f32(x, dst, ncols, pos, freq_scale,
- p_delta_rows, freq_base, n_ctx,
- item_ct1);
- });
-}
-
static void sum_rows_f32_sycl(const float *x, float *dst, const int ncols,
const int nrows, dpct::queue_ptr stream) {
const sycl::range<3> block_dims(1, 1, WARP_SIZE);
@@ -14066,8 +14007,8 @@ inline void ggml_sycl_op_rope(const ggml_tensor *src0, const ggml_tensor *src1,
//const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
- const int n_ctx = ((int32_t *) dst->op_params)[3];
- const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ //const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
// RoPE alteration for extended context
float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
@@ -14087,7 +14028,9 @@ inline void ggml_sycl_op_rope(const ggml_tensor *src0, const ggml_tensor *src1,
}
const bool is_neox = mode & 2;
- const bool is_glm = mode & 4;
+
+#pragma message("TODO: update rope NORM mode to match NEOX mode")
+#pragma message(" https://github.com/ggerganov/llama.cpp/pull/7634")
if (is_neox) {
pos = (const int32_t *) src1_dd;
@@ -14100,13 +14043,10 @@ inline void ggml_sycl_op_rope(const ggml_tensor *src0, const ggml_tensor *src1,
}
rope_corr_dims corr_dims;
- ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims.v);
+ ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims.v);
// compute
- if (is_glm) {
- GGML_ASSERT(false);
- rope_glm_f32_sycl(src0_dd, dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, n_ctx, main_stream);
- } else if (is_neox) {
+ if (is_neox) {
if (src0->type == GGML_TYPE_F32) {
rope_neox_sycl(
(const float *)src0_dd, (float *)dst_dd, ne00, n_dims, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
diff --git a/ggml-vulkan.cpp b/ggml-vulkan.cpp
index 5e12ea9dde4d7..e0c512c0dab0f 100644
--- a/ggml-vulkan.cpp
+++ b/ggml-vulkan.cpp
@@ -3898,11 +3898,6 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
{
const int mode = ((const int32_t *) dst->op_params)[2];
const bool is_neox = mode & 2;
- const bool is_glm = mode & 4;
-
- if (is_glm) {
- return nullptr;
- }
if (is_neox) {
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
@@ -4401,7 +4396,7 @@ static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context * subctx, con
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
// const int n_ctx = ((int32_t *) dst->op_params)[3];
- const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
const float freq_base = ((float *) dst->op_params)[5];
const float freq_scale = ((float *) dst->op_params)[6];
const float ext_factor = ((float *) dst->op_params)[7];
@@ -4410,12 +4405,12 @@ static void ggml_vk_rope(ggml_backend_vk_context * ctx, vk_context * subctx, con
const float beta_slow = ((float *) dst->op_params)[10];
const bool is_neox = mode & 2;
- const bool is_glm = mode & 4;
- GGML_ASSERT(!is_glm);
+#pragma message("TODO: update rope NORM mode to match NEOX mode")
+#pragma message(" https://github.com/ggerganov/llama.cpp/pull/7634")
float corr_dims[2];
- ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
+ ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
if (is_neox) {
const float theta_scale = powf(freq_base, -2.0f/n_dims);
@@ -6485,9 +6480,8 @@ GGML_CALL static bool ggml_backend_vk_supports_op(ggml_backend_t backend, const
case GGML_OP_ROPE:
{
const int mode = ((const int32_t *) op->op_params)[2];
- const bool is_glm = mode & 4;
- return !is_glm;
+ return true;
} break;
case GGML_OP_NONE:
case GGML_OP_RESHAPE:
@@ -6992,15 +6986,15 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_compute_
} else if (tensor->op == GGML_OP_ROPE) {
const int n_dims = ((int32_t *) tensor->op_params)[1];
const int mode = ((int32_t *) tensor->op_params)[2];
- const int n_ggml_ctx = ((int32_t *) tensor->op_params)[3];
- const int n_orig_ggml_ctx = ((int32_t *) tensor->op_params)[4];
+ //const int n_ctx_ggml = ((int32_t *) tensor->op_params)[3];
+ const int n_ctx_orig_ggml = ((int32_t *) tensor->op_params)[4];
float freq_base = ((float *) tensor->op_params)[5];
float freq_scale = ((float *) tensor->op_params)[6];
float ext_factor = ((float *) tensor->op_params)[7];
float attn_factor = ((float *) tensor->op_params)[8];
float beta_fast = ((float *) tensor->op_params)[9];
float beta_slow = ((float *) tensor->op_params)[10];
- tensor_clone = ggml_rope_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, n_dims, mode, n_ggml_ctx, n_orig_ggml_ctx, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
+ tensor_clone = ggml_rope_ext(ggml_ctx, src0_clone, src1_clone, src2_clone, n_dims, mode, n_ctx_orig_ggml, freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
} else if (tensor->op == GGML_OP_UNARY) {
switch (ggml_get_unary_op(tensor)) {
case GGML_UNARY_OP_SILU:
diff --git a/ggml.c b/ggml.c
index 8869e146ab2b8..1fc77743bc7b9 100644
--- a/ggml.c
+++ b/ggml.c
@@ -297,17 +297,12 @@ inline static void * ggml_calloc(size_t num, size_t size) {
#if defined(GGML_USE_ACCELERATE)
#include
-#if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
-#include "ggml-opencl.h"
-#endif
#elif defined(GGML_USE_OPENBLAS)
#if defined(GGML_BLAS_USE_MKL)
#include
#else
#include
#endif
-#elif defined(GGML_USE_CLBLAST)
-#include "ggml-opencl.h"
#endif
// floating point type used to accumulate sums
@@ -3380,10 +3375,6 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
GGML_PRINT_DEBUG("%s: g_state initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
}
-#if defined(GGML_USE_CLBLAST)
- ggml_cl_init();
-#endif
-
ggml_setup_op_has_task_pass();
is_first_call = false;
@@ -6259,16 +6250,13 @@ static struct ggml_tensor * ggml_rope_impl(
struct ggml_tensor * c,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
float attn_factor,
float beta_fast,
float beta_slow,
- float xpos_base,
- bool xpos_down,
bool inplace) {
GGML_ASSERT((mode & 1) == 0 && "mode & 1 == 1 is no longer supported");
@@ -6289,15 +6277,13 @@ static struct ggml_tensor * ggml_rope_impl(
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
+ int32_t params[11] = { /*n_past*/ 0, n_dims, mode, /*n_ctx*/ 0, n_ctx_orig };
memcpy(params + 5, &freq_base, sizeof(float));
memcpy(params + 6, &freq_scale, sizeof(float));
memcpy(params + 7, &ext_factor, sizeof(float));
memcpy(params + 8, &attn_factor, sizeof(float));
memcpy(params + 9, &beta_fast, sizeof(float));
memcpy(params + 10, &beta_slow, sizeof(float));
- memcpy(params + 11, &xpos_base, sizeof(float));
- memcpy(params + 12, &xpos_down, sizeof(bool));
ggml_set_op_params(result, params, sizeof(params));
result->op = GGML_OP_ROPE;
@@ -6314,10 +6300,9 @@ struct ggml_tensor * ggml_rope(
struct ggml_tensor * a,
struct ggml_tensor * b,
int n_dims,
- int mode,
- int n_ctx) {
+ int mode) {
return ggml_rope_impl(
- ctx, a, b, NULL, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
+ ctx, a, b, NULL, n_dims, mode, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, false
);
}
@@ -6326,10 +6311,9 @@ struct ggml_tensor * ggml_rope_inplace(
struct ggml_tensor * a,
struct ggml_tensor * b,
int n_dims,
- int mode,
- int n_ctx) {
+ int mode) {
return ggml_rope_impl(
- ctx, a, b, NULL, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
+ ctx, a, b, NULL, n_dims, mode, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, true
);
}
@@ -6340,8 +6324,7 @@ struct ggml_tensor * ggml_rope_ext(
struct ggml_tensor * c,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -6349,8 +6332,8 @@ struct ggml_tensor * ggml_rope_ext(
float beta_fast,
float beta_slow) {
return ggml_rope_impl(
- ctx, a, b, c, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
+ ctx, a, b, c, n_dims, mode, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow, false
);
}
@@ -6361,8 +6344,7 @@ struct ggml_tensor * ggml_rope_ext_inplace(
struct ggml_tensor * c,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -6370,8 +6352,8 @@ struct ggml_tensor * ggml_rope_ext_inplace(
float beta_fast,
float beta_slow) {
return ggml_rope_impl(
- ctx, a, b, c, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
+ ctx, a, b, c, n_dims, mode, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow, true
);
}
@@ -6381,8 +6363,7 @@ struct ggml_tensor * ggml_rope_custom(
struct ggml_tensor * b,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -6390,8 +6371,8 @@ struct ggml_tensor * ggml_rope_custom(
float beta_fast,
float beta_slow) {
return ggml_rope_impl(
- ctx, a, b, NULL, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
+ ctx, a, b, NULL, n_dims, mode, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow, false
);
}
@@ -6401,8 +6382,7 @@ struct ggml_tensor * ggml_rope_custom_inplace(
struct ggml_tensor * b,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -6410,21 +6390,11 @@ struct ggml_tensor * ggml_rope_custom_inplace(
float beta_fast,
float beta_slow) {
return ggml_rope_impl(
- ctx, a, b, NULL, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
- ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
+ ctx, a, b, NULL, n_dims, mode, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow, true
);
}
-struct ggml_tensor * ggml_rope_xpos_inplace(
- struct ggml_context * ctx,
- struct ggml_tensor * a,
- struct ggml_tensor * b,
- int n_dims,
- float base,
- bool down) {
- return ggml_rope_impl(ctx, a, b, NULL, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
-}
-
// ggml_rope_back
struct ggml_tensor * ggml_rope_back(
@@ -6434,16 +6404,13 @@ struct ggml_tensor * ggml_rope_back(
struct ggml_tensor * c,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
float attn_factor,
float beta_fast,
- float beta_slow,
- float xpos_base,
- bool xpos_down) {
+ float beta_slow) {
GGML_ASSERT(ggml_is_vector(b));
GGML_ASSERT(b->type == GGML_TYPE_I32);
GGML_ASSERT(a->ne[2] == b->ne[0]);
@@ -6459,15 +6426,13 @@ struct ggml_tensor * ggml_rope_back(
struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
- int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
+ int32_t params[11] = { /*n_past*/ 0, n_dims, mode, /*n_ctx*/ 0, n_ctx_orig };
memcpy(params + 5, &freq_base, sizeof(float));
memcpy(params + 6, &freq_scale, sizeof(float));
memcpy(params + 7, &ext_factor, sizeof(float));
memcpy(params + 8, &attn_factor, sizeof(float));
memcpy(params + 9, &beta_fast, sizeof(float));
memcpy(params + 10, &beta_slow, sizeof(float));
- memcpy(params + 11, &xpos_base, sizeof(float));
- memcpy(params + 12, &xpos_down, sizeof(bool));
ggml_set_op_params(result, params, sizeof(params));
result->op = GGML_OP_ROPE_BACK;
@@ -9053,17 +9018,6 @@ static void ggml_compute_forward_add_f32(
const int ith = params->ith;
const int nth = params->nth;
-#ifdef GGML_USE_CLBLAST
- if (src1->backend == GGML_BACKEND_TYPE_GPU) {
- // TODO: OpenCL kernel support full broadcast
- GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
- if (ith == 0) {
- ggml_cl_add(src0, src1, dst);
- }
- return;
- }
-#endif
-
const int nr = ggml_nrows(src0);
GGML_TENSOR_BINARY_OP_LOCALS
@@ -10171,17 +10125,6 @@ static void ggml_compute_forward_mul_f32(
const int ith = params->ith;
const int nth = params->nth;
-#if defined(GGML_USE_CLBLAST)
- if (src1->backend == GGML_BACKEND_TYPE_GPU) {
- // TODO: OpenCL kernel support full broadcast
- GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
- if (ith == 0) {
- ggml_cl_mul(src0, src1, dst);
- }
- return;
- }
-#endif
-
const int64_t nr = ggml_nrows(src0);
GGML_TENSOR_BINARY_OP_LOCALS
@@ -12417,15 +12360,6 @@ static void ggml_compute_forward_mul_mat(
// nb01 >= nb00 - src0 is not transposed
// compute by src0 rows
-#if defined(GGML_USE_CLBLAST)
- if (ggml_cl_can_mul_mat(src0, src1, dst)) {
- if (params->ith == 0 && params->type == GGML_TASK_TYPE_COMPUTE) {
- ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
- }
- return;
- }
-#endif
-
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_compute_forward_mul_mat_use_blas(dst)) {
const int64_t ne_plane = ne01*ne00;
@@ -12873,8 +12807,6 @@ static void ggml_compute_forward_out_prod_f32(
// nb01 >= nb00 - src0 is not transposed
// compute by src0 rows
- // TODO: #if defined(GGML_USE_CLBLAST)
-
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
bool use_blas = ggml_is_matrix(src0) &&
ggml_is_matrix(src1) &&
@@ -13072,7 +13004,7 @@ static void ggml_compute_forward_out_prod_q_f32(
// nb01 >= nb00 - src0 is not transposed
// compute by src0 rows
- // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
+ // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (params->type == GGML_TASK_TYPE_INIT) {
if (ith != 0) {
@@ -14269,8 +14201,7 @@ static float rope_yarn_ramp(const float low, const float high, const int i0) {
// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
static void rope_yarn(
float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
- float * cos_theta, float * sin_theta
-) {
+ float * cos_theta, float * sin_theta) {
// Get n-d rotational scaling corrected for extrapolation
float theta_interp = freq_scale * theta_extrap;
float theta = theta_interp;
@@ -14287,18 +14218,19 @@ static void rope_yarn(
// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
// `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
-static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
- return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
+static float ggml_rope_yarn_corr_dim(int n_dims, int n_ctx_orig, float n_rot, float base) {
+ return n_dims * logf(n_ctx_orig / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
}
static void ggml_rope_cache_init(
- float theta_base, float freq_scale, float corr_dims[2], int64_t ne0, float ext_factor, float mscale,
- float * cache, float sin_sign, float theta_scale
-) {
+ float theta_base, float freq_scale, const float * freq_factors, float corr_dims[2], int64_t ne0, float ext_factor, float mscale,
+ float * cache, float sin_sign, float theta_scale) {
+ // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py
float theta = theta_base;
for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
+ const float ff = freq_factors ? freq_factors[i0/2] : 1.0f;
rope_yarn(
- theta, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]
+ theta/ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]
);
cache[i0 + 1] *= sin_sign;
@@ -14307,11 +14239,11 @@ static void ggml_rope_cache_init(
}
GGML_CALL void ggml_rope_yarn_corr_dims(
- int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
+ int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]
) {
// start and end correction dims
- float start = floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base));
- float end = ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base));
+ float start = floorf(ggml_rope_yarn_corr_dim(n_dims, n_ctx_orig, beta_fast, freq_base));
+ float end = ceilf(ggml_rope_yarn_corr_dim(n_dims, n_ctx_orig, beta_slow, freq_base));
dims[0] = MAX(0, start);
dims[1] = MIN(n_dims - 1, end);
}
@@ -14331,15 +14263,11 @@ static void ggml_compute_forward_rope_f32(
float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
- // these two only relevant for xPos RoPE:
- float xpos_base;
- bool xpos_down;
-
//const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
- const int n_ctx = ((int32_t *) dst->op_params)[3];
- const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ //const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
@@ -14347,8 +14275,6 @@ static void ggml_compute_forward_rope_f32(
memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
- memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
- memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
GGML_TENSOR_UNARY_OP_LOCALS
@@ -14378,20 +14304,15 @@ static void ggml_compute_forward_rope_f32(
const float theta_scale = powf(freq_base, -2.0f/n_dims);
float corr_dims[2];
- ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
+ ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
const bool is_neox = mode & 2;
- const bool is_glm = mode & 4;
const float * freq_factors = NULL;
- if (is_neox) {
- if (src2 != NULL) {
- GGML_ASSERT(src2->type == GGML_TYPE_F32);
- GGML_ASSERT(src2->ne[0] >= n_dims / 2);
- freq_factors = (const float *) src2->data;
- }
- } else {
- GGML_ASSERT(src2 == NULL && "TODO: freq_factors not implemented for !is_neox");
+ if (src2 != NULL) {
+ GGML_ASSERT(src2->type == GGML_TYPE_F32);
+ GGML_ASSERT(src2->ne[0] >= n_dims / 2);
+ freq_factors = (const float *) src2->data;
}
// backward process uses inverse rotation by cos and sin.
@@ -14406,94 +14327,50 @@ static void ggml_compute_forward_rope_f32(
const int64_t p = pos[i2];
float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
- if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
- ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
- }
+ ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
- float theta_base = (float)p;
-
- if (is_glm) {
- theta_base = MIN(p, n_ctx - 2);
- float block_theta = MAX(p - (n_ctx - 2), 0);
- for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
- const float cos_theta = cosf(theta_base);
- const float sin_theta = sinf(theta_base) * sin_sign;
- const float cos_block_theta = cosf(block_theta);
- const float sin_block_theta = sinf(block_theta) * sin_sign;
-
- theta_base *= theta_scale;
- block_theta *= theta_scale;
-
- const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
-
- const float x0 = src[0];
- const float x1 = src[n_dims/2];
- const float x2 = src[n_dims];
- const float x3 = src[n_dims/2*3];
-
- dst_data[0] = x0*cos_theta - x1*sin_theta;
- dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
- dst_data[n_dims] = x2*cos_block_theta - x3*sin_block_theta;
- dst_data[n_dims/2*3] = x2*sin_block_theta + x3*cos_block_theta;
- }
- } else if (!is_neox) {
- for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
+ if (!is_neox) {
+ for (int64_t i0 = 0; i0 < n_dims; i0 += 2) {
const float cos_theta = cache[i0 + 0];
const float sin_theta = cache[i0 + 1];
- // zeta scaling for xPos only:
- float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
- if (xpos_down) zeta = 1.0f / zeta;
-
const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
const float x0 = src[0];
const float x1 = src[1];
- dst_data[0] = x0*cos_theta*zeta - x1*sin_theta*zeta;
- dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[1] = x0*sin_theta + x1*cos_theta;
}
} else {
- // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py
- for (int64_t ic = 0; ic < ne0; ic += 2) {
- if (ic < n_dims) {
- const int64_t i0 = ic/2;
-
- const float freq_factor = freq_factors ? freq_factors[i0] : 1.0f;
+ for (int64_t i0 = 0; i0 < n_dims; i0 += 2) {
+ const int64_t ic = i0/2;
- float cos_theta, sin_theta;
- rope_yarn(
- theta_base/freq_factor, freq_scale, corr_dims, ic, ext_factor, attn_factor,
- &cos_theta, &sin_theta
- );
-
- sin_theta *= sin_sign;
- theta_base *= theta_scale;
+ const float cos_theta = cache[i0 + 0];
+ const float sin_theta = cache[i0 + 1];
- const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00);
+ float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0);
- const float x0 = src[0];
- const float x1 = src[n_dims/2];
+ const float x0 = src[0];
+ const float x1 = src[n_dims/2];
- dst_data[0] = x0*cos_theta - x1*sin_theta;
- dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
- } else {
- const int64_t i0 = ic;
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
+ }
+ }
- const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) {
+ const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- dst_data[0] = src[0];
- dst_data[1] = src[1];
- }
- }
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
}
}
}
@@ -14519,8 +14396,8 @@ static void ggml_compute_forward_rope_f16(
//const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
- const int n_ctx = ((int32_t *) dst->op_params)[3];
- const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ //const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
@@ -14556,20 +14433,15 @@ static void ggml_compute_forward_rope_f16(
const float theta_scale = powf(freq_base, -2.0f/n_dims);
float corr_dims[2];
- ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
+ ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims);
const bool is_neox = mode & 2;
- const bool is_glm = mode & 4;
const float * freq_factors = NULL;
- if (is_neox) {
- if (src2 != NULL) {
- GGML_ASSERT(src2->type == GGML_TYPE_F32);
- GGML_ASSERT(src2->ne[0] >= n_dims / 2);
- freq_factors = (const float *) src2->data;
- }
- } else {
- GGML_ASSERT(src2 == NULL && "TODO: freq_factors not implemented for !is_neox");
+ if (src2 != NULL) {
+ GGML_ASSERT(src2->type == GGML_TYPE_F32);
+ GGML_ASSERT(src2->ne[0] >= n_dims / 2);
+ freq_factors = (const float *) src2->data;
}
// backward process uses inverse rotation by cos and sin.
@@ -14584,43 +14456,14 @@ static void ggml_compute_forward_rope_f16(
const int64_t p = pos[i2];
float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32)*ith;
- if (!is_glm && !is_neox) { // TODO: cache sin/cos for glm, neox
- ggml_rope_cache_init(p, freq_scale, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
- }
+ ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale);
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
- float theta_base = (float)p;
-
- if (is_glm) {
- theta_base = MIN(p, n_ctx - 2);
- float block_theta = MAX(p - (n_ctx - 2), 0);
- for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
- const float cos_theta = cosf(theta_base);
- const float sin_theta = sinf(theta_base) * sin_sign;
- const float cos_block_theta = cosf(block_theta);
- const float sin_block_theta = sinf(block_theta) * sin_sign;
-
- theta_base *= theta_scale;
- block_theta *= theta_scale;
-
- const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
-
- const float x0 = GGML_FP16_TO_FP32(src[0]);
- const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
- const float x2 = GGML_FP16_TO_FP32(src[n_dims]);
- const float x3 = GGML_FP16_TO_FP32(src[n_dims/2*3]);
-
- dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
- dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
- dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
- dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
- }
- } else if (!is_neox) {
- for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
+ if (!is_neox) {
+ for (int64_t i0 = 0; i0 < n_dims; i0 += 2) {
const float cos_theta = cache[i0 + 0];
const float sin_theta = cache[i0 + 1];
@@ -14634,40 +14477,29 @@ static void ggml_compute_forward_rope_f16(
dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
}
} else {
- // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py
- for (int64_t ic = 0; ic < ne0; ic += 2) {
- if (ic < n_dims) {
- const int64_t i0 = ic/2;
-
- const float freq_factor = freq_factors ? freq_factors[i0] : 1.0f;
+ for (int64_t i0 = 0; i0 < n_dims; i0 += 2) {
+ const int64_t ic = i0/2;
- float cos_theta, sin_theta;
- rope_yarn(
- theta_base/freq_factor, freq_scale, corr_dims, ic, ext_factor, attn_factor,
- &cos_theta, &sin_theta
- );
-
- sin_theta *= sin_sign;
- theta_base *= theta_scale;
+ const float cos_theta = cache[i0 + 0];
+ const float sin_theta = cache[i0 + 1];
- const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + ic*nb00);
+ ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + ic*nb0);
- const float x0 = GGML_FP16_TO_FP32(src[0]);
- const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
+ const float x0 = GGML_FP16_TO_FP32(src[0]);
+ const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
- dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
- dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
- } else {
- const int64_t i0 = ic;
+ dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
+ dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
+ }
+ }
- const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) {
+ const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- dst_data[0] = src[0];
- dst_data[1] = src[1];
- }
- }
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
}
}
}
@@ -18369,9 +18201,9 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
//const int n_past = ((int32_t *) tensor->op_params)[0];
const int n_dims = ((int32_t *) tensor->op_params)[1];
const int mode = ((int32_t *) tensor->op_params)[2];
- const int n_ctx = ((int32_t *) tensor->op_params)[3];
- const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
- float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
+ //const int n_ctx = ((int32_t *) tensor->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) tensor->op_params)[4];
+ float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
@@ -18379,8 +18211,6 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
- memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
- memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
src0->grad = ggml_add_or_set(ctx,
src0->grad,
@@ -18390,16 +18220,13 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
src2,
n_dims,
mode,
- n_ctx,
- n_orig_ctx,
+ n_ctx_orig,
freq_base,
freq_scale,
ext_factor,
attn_factor,
beta_fast,
- beta_slow,
- xpos_base,
- xpos_down),
+ beta_slow),
zero_table);
}
} break;
@@ -18409,9 +18236,9 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
//const int n_past = ((int32_t *) tensor->op_params)[0];
const int n_dims = ((int32_t *) tensor->op_params)[1];
const int mode = ((int32_t *) tensor->op_params)[2];
- const int n_ctx = ((int32_t *) tensor->op_params)[3];
- const int n_orig_ctx = ((int32_t *) tensor->op_params)[4];
- float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow, xpos_base, xpos_down;
+ //const int n_ctx = ((int32_t *) tensor->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) tensor->op_params)[4];
+ float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
memcpy(&freq_base, (int32_t *) tensor->op_params + 5, sizeof(float));
memcpy(&freq_scale, (int32_t *) tensor->op_params + 6, sizeof(float));
@@ -18419,8 +18246,6 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
memcpy(&attn_factor, (int32_t *) tensor->op_params + 8, sizeof(float));
memcpy(&beta_fast, (int32_t *) tensor->op_params + 9, sizeof(float));
memcpy(&beta_slow, (int32_t *) tensor->op_params + 10, sizeof(float));
- memcpy(&xpos_base, (int32_t *) tensor->op_params + 11, sizeof(float));
- memcpy(&xpos_down, (int32_t *) tensor->op_params + 12, sizeof(bool));
src0->grad = ggml_add_or_set(ctx,
src0->grad,
@@ -18430,16 +18255,13 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
src2,
n_dims,
mode,
- n_ctx,
- n_orig_ctx,
+ n_ctx_orig,
freq_base,
freq_scale,
ext_factor,
attn_factor,
beta_fast,
beta_slow,
- xpos_base,
- xpos_down,
false),
zero_table);
}
@@ -19546,11 +19368,6 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
{
const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
-#if defined(GGML_USE_CLBLAST)
- if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
- cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
- } else
-#endif
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_compute_forward_mul_mat_use_blas(node)) {
if (node->src[0]->type != GGML_TYPE_F32) {
@@ -22859,7 +22676,7 @@ int ggml_cpu_has_wasm_simd(void) {
}
int ggml_cpu_has_blas(void) {
-#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_SYCL)
+#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CUDA) || defined(GGML_USE_VULKAN) || defined(GGML_USE_SYCL)
return 1;
#else
return 0;
@@ -22874,14 +22691,6 @@ int ggml_cpu_has_cuda(void) {
#endif
}
-int ggml_cpu_has_clblast(void) {
-#if defined(GGML_USE_CLBLAST)
- return 1;
-#else
- return 0;
-#endif
-}
-
int ggml_cpu_has_vulkan(void) {
#if defined(GGML_USE_VULKAN)
return 1;
@@ -22915,8 +22724,7 @@ int ggml_cpu_has_rpc(void) {
}
int ggml_cpu_has_gpublas(void) {
- return ggml_cpu_has_cuda() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
- ggml_cpu_has_sycl();
+ return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();
}
int ggml_cpu_has_sse3(void) {
diff --git a/ggml.h b/ggml.h
index f38699698b1e9..13502a3622fc4 100644
--- a/ggml.h
+++ b/ggml.h
@@ -1465,7 +1465,6 @@ extern "C" {
// rotary position embedding
// if mode & 1 == 1, skip n_past elements (NOT SUPPORTED)
// if mode & 2 == 1, GPT-NeoX style
- // if mode & 4 == 1, ChatGLM style
//
// b is an int32 vector with size a->ne[2], it contains the positions
// c is freq factors (e.g. phi3-128k), (optional)
@@ -1474,8 +1473,7 @@ extern "C" {
struct ggml_tensor * a,
struct ggml_tensor * b,
int n_dims,
- int mode,
- int n_ctx);
+ int mode);
// in-place, returns view(a)
GGML_API struct ggml_tensor * ggml_rope_inplace(
@@ -1483,8 +1481,7 @@ extern "C" {
struct ggml_tensor * a,
struct ggml_tensor * b,
int n_dims,
- int mode,
- int n_ctx);
+ int mode);
// custom RoPE
GGML_API struct ggml_tensor * ggml_rope_ext(
@@ -1494,8 +1491,7 @@ extern "C" {
struct ggml_tensor * c,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -1511,8 +1507,7 @@ extern "C" {
struct ggml_tensor * c,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -1526,8 +1521,7 @@ extern "C" {
struct ggml_tensor * b,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -1542,8 +1536,7 @@ extern "C" {
struct ggml_tensor * b,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
@@ -1552,17 +1545,9 @@ extern "C" {
float beta_slow),
"use ggml_rope_ext_inplace instead");
- struct ggml_tensor * ggml_rope_xpos_inplace(
- struct ggml_context * ctx,
- struct ggml_tensor * a,
- struct ggml_tensor * b,
- int n_dims,
- float base,
- bool down);
-
// compute correction dims for YaRN RoPE scaling
GGML_CALL void ggml_rope_yarn_corr_dims(
- int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]);
+ int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]);
// rotary position embedding backward, i.e compute dx from dy
// a - dy
@@ -1573,16 +1558,13 @@ extern "C" {
struct ggml_tensor * c,
int n_dims,
int mode,
- int n_ctx,
- int n_orig_ctx,
+ int n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
float attn_factor,
float beta_fast,
- float beta_slow,
- float xpos_base,
- bool xpos_down);
+ float beta_slow);
// clamp
// in-place, returns view(a)
@@ -2425,7 +2407,6 @@ extern "C" {
GGML_API int ggml_cpu_has_wasm_simd (void);
GGML_API int ggml_cpu_has_blas (void);
GGML_API int ggml_cpu_has_cuda (void);
- GGML_API int ggml_cpu_has_clblast (void);
GGML_API int ggml_cpu_has_vulkan (void);
GGML_API int ggml_cpu_has_kompute (void);
GGML_API int ggml_cpu_has_gpublas (void);
diff --git a/kompute-shaders/op_rope_f16.comp b/kompute-shaders/op_rope_f16.comp
index b446225849d5f..1a4058b3f1f10 100644
--- a/kompute-shaders/op_rope_f16.comp
+++ b/kompute-shaders/op_rope_f16.comp
@@ -14,7 +14,7 @@ void main() {
const bool is_neox = (pcs.mode & 2) != 0;
float corr_dims[2];
- rope_yarn_corr_dims(pcs.n_dims, pcs.n_orig_ctx, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims);
+ rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims);
const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims);
diff --git a/kompute-shaders/op_rope_f32.comp b/kompute-shaders/op_rope_f32.comp
index 2c0235d75b6b6..65e03827a2660 100644
--- a/kompute-shaders/op_rope_f32.comp
+++ b/kompute-shaders/op_rope_f32.comp
@@ -14,7 +14,7 @@ void main() {
const bool is_neox = (pcs.mode & 2) != 0;
float corr_dims[2];
- rope_yarn_corr_dims(pcs.n_dims, pcs.n_orig_ctx, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims);
+ rope_yarn_corr_dims(pcs.n_dims, pcs.n_ctx_orig, pcs.freq_base, pcs.beta_fast, pcs.beta_slow, corr_dims);
const float theta_scale = pow(pcs.freq_base, -2.0/pcs.n_dims);
diff --git a/kompute-shaders/rope_common.comp b/kompute-shaders/rope_common.comp
index 57ba6597a7eb2..7b9394cb2fffc 100644
--- a/kompute-shaders/rope_common.comp
+++ b/kompute-shaders/rope_common.comp
@@ -9,7 +9,7 @@ layout (push_constant) uniform parameter {
uint outOff;
int n_dims;
int mode;
- int n_orig_ctx;
+ int n_ctx_orig;
float freq_base;
float freq_scale;
float ext_factor;
@@ -54,14 +54,14 @@ void rope_yarn(
// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
-float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) {
- return n_dims * log(n_orig_ctx / (n_rot * TWOPI_F)) / (2 * log(base));
+float rope_yarn_corr_factor(int n_dims, int n_ctx_orig, float n_rot, float base) {
+ return n_dims * log(n_ctx_orig / (n_rot * TWOPI_F)) / (2 * log(base));
}
void rope_yarn_corr_dims(
- int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, out float dims[2]
+ int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, out float dims[2]
) {
// start and end correction dims
- dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base)));
- dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base)));
+ dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_fast, freq_base)));
+ dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_slow, freq_base)));
}
diff --git a/llama.cpp b/llama.cpp
index e76da869cef68..6def11fee4296 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -13,8 +13,6 @@
#ifdef GGML_USE_CUDA
# include "ggml-cuda.h"
-#elif defined(GGML_USE_CLBLAST)
-# include "ggml-opencl.h"
#elif defined(GGML_USE_VULKAN)
# include "ggml-vulkan.h"
#elif defined(GGML_USE_SYCL)
@@ -110,7 +108,7 @@
//
LLAMA_ATTRIBUTE_FORMAT(2, 3)
-static void llama_log_internal (ggml_log_level level, const char* format, ...);
+static void llama_log_internal (ggml_log_level level, const char * format, ...);
static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
@@ -1851,7 +1849,7 @@ struct llama_hparams {
float rope_attn_factor = 1.0f;
float rope_freq_base_train;
float rope_freq_scale_train;
- uint32_t n_yarn_orig_ctx;
+ uint32_t n_ctx_orig_yarn;
float rope_yarn_log_mul;
// for State Space Models
@@ -1893,7 +1891,7 @@ struct llama_hparams {
if (this->n_expert_shared != other.n_expert_shared) return true;
if (this->rope_finetuned != other.rope_finetuned) return true;
- if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
+ if (this->n_ctx_orig_yarn != other.n_ctx_orig_yarn) return true;
if (this->ssm_d_conv != other.ssm_d_conv) return true;
if (this->ssm_d_inner != other.ssm_d_inner) return true;
@@ -1952,7 +1950,7 @@ struct llama_cparams {
float rope_freq_base;
float rope_freq_scale;
- uint32_t n_yarn_orig_ctx;
+ uint32_t n_ctx_orig_yarn;
// These hyperparameters are not exposed in GGUF, because all
// existing YaRN models use the same values for them.
float yarn_ext_factor;
@@ -2407,8 +2405,6 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_
buft = ggml_backend_vk_buffer_type(gpu);
#elif defined(GGML_USE_SYCL)
buft = ggml_backend_sycl_buffer_type(gpu);
-#elif defined(GGML_USE_CLBLAST)
- buft = ggml_backend_opencl_buffer_type();
#elif defined(GGML_USE_KOMPUTE)
buft = ggml_backend_kompute_buffer_type(gpu);
if (buft == nullptr) {
@@ -2531,10 +2527,6 @@ static bool llama_kv_cache_init(
}
}
-#ifdef GGML_USE_CLBLAST
- offload = false;
-#endif
-
// count used buffer types
std::map buft_layer_count;
if (offload) {
@@ -4014,8 +4006,8 @@ static void llm_load_hparams(
ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
hparams.rope_finetuned = rope_finetuned;
- hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
- ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_yarn_orig_ctx, false);
+ hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
+ ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
// rope_freq_base (optional)
hparams.rope_freq_base_train = 10000.0f;
@@ -4977,7 +4969,7 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type);
LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
- LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
+ LLAMA_LOG_INFO("%s: n_ctx_orig_yarn = %u\n", __func__, hparams.n_ctx_orig_yarn);
LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
LLAMA_LOG_INFO("%s: ssm_d_conv = %u\n", __func__, hparams.ssm_d_conv);
LLAMA_LOG_INFO("%s: ssm_d_inner = %u\n", __func__, hparams.ssm_d_inner);
@@ -7146,7 +7138,7 @@ struct llm_build_context {
const int32_t n_kv; // size of KV cache to consider (n_kv <= kv_self.size)
const int32_t n_outputs;
const int32_t kv_head; // index of where we store new KV data in the cache
- const int32_t n_orig_ctx;
+ const int32_t n_ctx_orig;
const bool flash_attn;
@@ -7195,7 +7187,7 @@ struct llm_build_context {
n_kv (worst_case ? kv_self.size : kv_self.n),
n_outputs (worst_case ? n_tokens : lctx.n_outputs),
kv_head (worst_case ? (kv_self.recurrent ? 0 : kv_self.size - n_tokens) : kv_self.head),
- n_orig_ctx (cparams.n_yarn_orig_ctx),
+ n_ctx_orig (cparams.n_ctx_orig_yarn),
flash_attn (cparams.flash_attn),
pooling_type (cparams.pooling_type),
rope_type (hparams.rope_type),
@@ -7253,7 +7245,7 @@ struct llm_build_context {
ggml_row_size(kv_self.k_l[il]->type, n_embd_head_k),
ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa),
0),
- lctx.inp_K_shift, rope_factors, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ lctx.inp_K_shift, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
cb(tmp, "K_shifted", il);
@@ -7362,7 +7354,7 @@ struct llm_build_context {
// choose long/short freq factors based on the context size
const auto n_ctx_pre_seq = cparams.n_ctx / cparams.n_seq_max;
- if (n_ctx_pre_seq > hparams.n_yarn_orig_ctx) {
+ if (n_ctx_pre_seq > hparams.n_ctx_orig_yarn) {
return model.layers[il].rope_long;
}
@@ -7478,14 +7470,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -7609,12 +7601,12 @@ struct llm_build_context {
case MODEL_7B:
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
break;
@@ -7721,14 +7713,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -7841,13 +7833,13 @@ struct llm_build_context {
// using mode = 2 for neox mode
Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -7965,14 +7957,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -8118,14 +8110,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -8472,14 +8464,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -8918,14 +8910,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, Qcur, inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, Kcur, inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -9037,13 +9029,13 @@ struct llm_build_context {
// using mode = 2 for neox mode
Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -9149,14 +9141,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -9263,14 +9255,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -9415,7 +9407,7 @@ struct llm_build_context {
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
@@ -9426,7 +9418,7 @@ struct llm_build_context {
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -9537,7 +9529,7 @@ struct llm_build_context {
Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
Qcur = ggml_rope_ext(
- ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
@@ -9546,7 +9538,7 @@ struct llm_build_context {
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
- ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, 0, n_orig_ctx,
+ ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig,
freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -9654,13 +9646,13 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens), inp_pos, nullptr,
- n_embd_head, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens), inp_pos, nullptr,
- n_embd_head, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_embd_head, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
cb(Kcur, "Kcur", il);
@@ -9862,14 +9854,14 @@ struct llm_build_context {
struct ggml_tensor * Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
struct ggml_tensor * Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -9978,14 +9970,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -10095,14 +10087,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -10225,14 +10217,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -10345,7 +10337,7 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr,
- n_embd_head_k, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
cb(Qcur, "Qcur", il);
@@ -10354,7 +10346,7 @@ struct llm_build_context {
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr,
- n_embd_head_k, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
cb(Kcur, "Kcur", il);
@@ -10465,14 +10457,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -10755,14 +10747,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -10886,14 +10878,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -11000,14 +10992,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -11135,14 +11127,14 @@ struct llm_build_context {
Qcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Qcur, "Qcur", il);
Kcur = ggml_rope_ext(
ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow
);
cb(Kcur, "Kcur", il);
@@ -11352,7 +11344,7 @@ struct llm_build_context {
q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
q_pe = ggml_rope_ext(
ctx0, q_pe, inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor_scaled, beta_fast, beta_slow
);
cb(q_pe, "q_pe", il);
@@ -11361,7 +11353,7 @@ struct llm_build_context {
k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
k_pe = ggml_rope_ext(
ctx0, k_pe, inp_pos, nullptr,
- n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
ext_factor, attn_factor_scaled, beta_fast, beta_slow
);
cb(k_pe, "k_pe", il);
@@ -14721,260 +14713,6 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
}
-//
-// Beam search
-//
-
-struct llama_beam {
- std::vector tokens;
- float p; // Cumulative beam probability (renormalized relative to all beams)
- bool eob; // Initialize end-of-beam to false. Callback sets this to true.
- // Sort beams by probability. In case of ties, prefer beams at eob.
- bool operator<(const llama_beam & rhs) const {
- return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
- }
- // Shift off first n tokens and discard them.
- void shift_tokens(const size_t n) {
- if (n) {
- std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
- tokens.resize(tokens.size() - n);
- }
- }
- llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
-};
-
-// A struct for calculating logit-related info.
-struct llama_logit_info {
- const float * const logits;
- const int n_vocab;
- const float max_l;
- const float normalizer;
- struct sum_exp {
- float max_l;
- float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
- };
- llama_logit_info(llama_context * ctx)
- : logits(llama_get_logits(ctx))
- , n_vocab(llama_n_vocab(llama_get_model(ctx)))
- , max_l(*std::max_element(logits, logits + n_vocab))
- , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
- { }
- llama_token_data get_token_data(const llama_token token_id) const {
- constexpr auto p = std::numeric_limits::quiet_NaN(); // never used
- return {token_id, logits[token_id], p};
- }
- // Return top k token_data by logit.
- std::vector top_k(size_t k) {
- std::vector min_heap; // min-heap by logit
- const llama_token k_min = std::min(static_cast(k), n_vocab);
- min_heap.reserve(k_min);
- for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
- min_heap.push_back(get_token_data(token_id));
- }
- auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
- std::make_heap(min_heap.begin(), min_heap.end(), comp);
- for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
- if (min_heap.front().logit < logits[token_id]) {
- std::pop_heap(min_heap.begin(), min_heap.end(), comp);
- min_heap.back().id = token_id;
- min_heap.back().logit = logits[token_id];
- std::push_heap(min_heap.begin(), min_heap.end(), comp);
- }
- }
- return min_heap;
- }
- float probability_from_logit(float logit) const {
- return normalizer * std::exp(logit - max_l);
- }
-};
-
-struct llama_beam_search_data {
- llama_context * ctx;
- size_t n_beams;
- int n_past;
- int n_predict;
- std::vector beams;
- std::vector next_beams;
-
- // Re-calculated on each loop iteration
- size_t common_prefix_length;
-
- // Used to communicate to/from callback on beams state.
- std::vector beam_views;
-
- llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
- : ctx(ctx)
- , n_beams(n_beams)
- , n_past(n_past)
- , n_predict(n_predict)
- , beam_views(n_beams) {
- beams.reserve(n_beams);
- next_beams.reserve(n_beams);
- }
-
- // Collapse beams to a single beam given by index.
- void collapse_beams(const size_t beam_idx) {
- if (0u < beam_idx) {
- std::swap(beams[0], beams[beam_idx]);
- }
- beams.resize(1);
- }
-
- // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
- // The repetitive patterns below reflect the 2 stages of heaps:
- // * Gather elements until the vector is full, then call std::make_heap() on it.
- // * If the heap is full and a new element is found that should be included, pop the
- // least element to the back(), replace it with the new, then push it into the heap.
- void fill_next_beams_by_top_probabilities(llama_beam & beam) {
- // Min-heaps use a greater-than comparator.
- const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
- if (beam.eob) {
- // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
- if (next_beams.size() < n_beams) {
- next_beams.push_back(std::move(beam));
- if (next_beams.size() == n_beams) {
- std::make_heap(next_beams.begin(), next_beams.end(), comp);
- }
- } else if (next_beams.front().p < beam.p) {
- std::pop_heap(next_beams.begin(), next_beams.end(), comp);
- next_beams.back() = std::move(beam);
- std::push_heap(next_beams.begin(), next_beams.end(), comp);
- }
- } else {
- // beam is not at end-of-sentence, so branch with next top_k tokens.
- if (!beam.tokens.empty()) {
- llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
- }
- llama_logit_info logit_info(ctx);
- std::vector next_tokens = logit_info.top_k(n_beams);
-
- // Clear the kv slot so that other beams may try different tokens at this position. The llama_decode()
- // call in loop() will conclusively fill in the kv slot once the beams converge at this position.
- llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
-
- size_t i=0;
- if (next_beams.size() < n_beams) {
- for (; next_beams.size() < n_beams ; ++i) {
- llama_beam next_beam = beam;
- next_beam.tokens.push_back(next_tokens[i].id);
- next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
- next_beams.push_back(std::move(next_beam));
- }
- std::make_heap(next_beams.begin(), next_beams.end(), comp);
- } else {
- for (; next_beams.front().p == 0.0f ; ++i) {
- std::pop_heap(next_beams.begin(), next_beams.end(), comp);
- next_beams.back() = beam;
- next_beams.back().tokens.push_back(next_tokens[i].id);
- next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
- std::push_heap(next_beams.begin(), next_beams.end(), comp);
- }
- }
- for (; i < n_beams ; ++i) {
- const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
- if (next_beams.front().p < next_p) {
- std::pop_heap(next_beams.begin(), next_beams.end(), comp);
- next_beams.back() = beam;
- next_beams.back().tokens.push_back(next_tokens[i].id);
- next_beams.back().p = next_p;
- std::push_heap(next_beams.begin(), next_beams.end(), comp);
- }
- }
- }
- }
-
- // Find common_prefix_length based on beams.
- // Requires beams is not empty.
- size_t find_common_prefix_length() {
- size_t common_prefix_length = beams[0].tokens.size();
- for (size_t i = 1 ; i < beams.size() ; ++i) {
- common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
- for (size_t j = 0 ; j < common_prefix_length ; ++j) {
- if (beams[0].tokens[j] != beams[i].tokens[j]) {
- common_prefix_length = j;
- break;
- }
- }
- }
- return common_prefix_length;
- }
-
- // Construct beams_state to send back to caller via the callback function.
- // Side effect: set common_prefix_length = find_common_prefix_length();
- llama_beams_state get_beams_state(const bool last_call) {
- for (size_t i = 0 ; i < beams.size() ; ++i) {
- beam_views[i] = beams[i].view();
- }
- common_prefix_length = find_common_prefix_length();
- return {beam_views.data(), beams.size(), common_prefix_length, last_call};
- }
-
- // Loop:
- // * while i < n_predict, AND
- // * any of the beams have not yet reached end-of-beam (eob), AND
- // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
- // (since all other beam probabilities can only decrease)
- void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
- beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
- const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
- for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
- !beams[top_beam_index()].eob ; ++i) {
- callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
- update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
- if (common_prefix_length) {
- llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
- n_past += common_prefix_length;
- }
- // Zero-out next_beam probabilities to place them last in following min-heap.
- std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
- for (llama_beam & beam : beams) {
- beam.shift_tokens(common_prefix_length);
- fill_next_beams_by_top_probabilities(beam);
- }
- // next_beams become the beams of next/final iteration. Swap them to re-use memory.
- beams.swap(next_beams);
- renormalize_beam_probabilities(beams);
- }
- collapse_beams(top_beam_index());
- callback(callback_data, get_beams_state(true));
- }
-
- // As beams grow, the cumulative probabilities decrease.
- // Renormalize them to avoid floating point underflow.
- static void renormalize_beam_probabilities(std::vector