From 33f0761e9be03c9a762518e1c08a554ad46dd251 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 28 Dec 2023 19:15:46 +0100 Subject: [PATCH 01/34] llama : ggml-backend integration --- ggml-alloc.c | 11 + ggml-backend-impl.h | 1 + ggml-backend.c | 62 +- ggml-backend.h | 8 + ggml-cuda.cu | 324 ++------ ggml-cuda.h | 26 +- ggml.c | 4 + llama.cpp | 1828 +++++++++++++++---------------------------- 8 files changed, 753 insertions(+), 1511 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index a27dd54b0eb06..5e730aa9a9d96 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -779,10 +779,21 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte if (nbytes == 0) { // all the tensors in the context are already allocated +#ifndef NDEBUG + fprintf(stderr, "%s: all tensors in the context are already allocated\n", __func__); +#endif return NULL; } ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, nbytes); + if (buffer == NULL) { + // failed to allocate buffer +#ifndef NDEBUG + fprintf(stderr, "%s: failed to allocate buffer\n", __func__); +#endif + return NULL; + } + ggml_tallocr_t tallocr = ggml_tallocr_new_from_buffer(buffer); for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { diff --git a/ggml-backend-impl.h b/ggml-backend-impl.h index ca21b474372a6..aa2587a196f89 100644 --- a/ggml-backend-impl.h +++ b/ggml-backend-impl.h @@ -51,6 +51,7 @@ extern "C" { ggml_backend_buffer_type_t buft; ggml_backend_buffer_context_t context; size_t size; + enum ggml_backend_buffer_usage usage; }; ggml_backend_buffer_t ggml_backend_buffer_init( diff --git a/ggml-backend.c b/ggml-backend.c index 53e741cb892f8..10c69c5711746 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -58,6 +58,7 @@ ggml_backend_buffer_t ggml_backend_buffer_init( /* .buft = */ buft, /* .context = */ context, /* .size = */ size, + /* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY }; return buffer; @@ -109,6 +110,10 @@ bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) { return ggml_backend_buft_is_host(ggml_backend_buffer_type(buffer)); } +void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { + buffer->usage = usage; +} + ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer) { return buffer->buft; } @@ -777,7 +782,7 @@ static ggml_backend_t get_allocr_backend(ggml_backend_sched_t sched, ggml_talloc } #if 0 -static char causes[GGML_DEFAULT_GRAPH_SIZE*8 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove +static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__) #define GET_CAUSE(node) causes[hash_id(node)] #else @@ -812,17 +817,25 @@ static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct if (src == NULL) { break; } + ggml_backend_t src_backend = get_buffer_backend(sched, src->buffer); - if (src_backend != NULL) { + if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { + // operations with weights are always on the same backend as the weights + cur_backend = src_backend; + SET_CAUSE(node, "1.wgt%d", i); + break; + } + + //if (src_backend != NULL) { int src_prio = sched_backend_prio(sched, src_backend); size_t src_size = ggml_nbytes(src); - if (src_prio < cur_prio && src_size >= cur_size) { + if (/*src_prio < cur_prio &&*/ src_size >= cur_size) { cur_prio = src_prio; cur_size = src_size; cur_backend = src_backend; SET_CAUSE(node, "1.src%d", i); } - } + //} } return cur_backend; } @@ -933,6 +946,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } //printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#if 0 // pass 2: assign backends to ops from current assignments // TODO: // - reuse sched_backend_from_cur @@ -964,6 +978,23 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } } } +#else + // pass 2: assign backends to ops from current assignments + // start from the end and assign the same backend to previous ops + { + ggml_tallocr_t cur_allocr = NULL; + for (int i = graph->n_nodes - 1; i >= 0; i--) { + struct ggml_tensor * node = graph->nodes[i]; + ggml_tallocr_t node_allocr = node_allocr(node); + if (node_allocr != NULL) { + cur_allocr = node_allocr; + } else { + node_allocr(node) = cur_allocr; + SET_CAUSE(node, "2.cur"); + } + } + } +#endif //printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); // pass 3: assign backends to remaining src from dst (should only be leafs) @@ -1029,9 +1060,21 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } ggml_tallocr_t src_allocr = node_allocr(src); if (src_allocr != node_allocr) { - int n_inputs = sched->splits[cur_split].n_inputs++; - GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS); - sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src; + // check if the input is already in the split + bool found = false; + for (int k = 0; k < sched->splits[cur_split].n_inputs; k++) { + if (sched->splits[cur_split].inputs[k] == src) { + found = true; + break; + } + } + + if (!found) { + int n_inputs = sched->splits[cur_split].n_inputs++; + //printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr))); + GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS); + sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src; + } // create copies size_t id = hash_id(src); @@ -1235,6 +1278,10 @@ void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cg sched_reset(sched); } +int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) { + return sched->n_splits; +} + ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) { int backend_index = sched_backend_prio(sched, backend); return sched->tallocs[backend_index]; @@ -1320,6 +1367,7 @@ static void graph_init_tensor(struct ggml_hash_set hash_set, struct ggml_tensor struct ggml_tensor * dst = node_copies[id]; if (dst->view_src != NULL) { + graph_init_tensor(hash_set, node_copies, node_init, src->view_src); ggml_backend_view_init(dst->view_src->buffer, dst); } else { diff --git a/ggml-backend.h b/ggml-backend.h index 85ff67b0ea843..8649ab766f2e7 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -24,6 +24,11 @@ extern "C" { GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft); // buffer + enum ggml_backend_buffer_usage { + GGML_BACKEND_BUFFER_USAGE_ANY = 0, + GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1, + }; + GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); @@ -32,8 +37,10 @@ extern "C" { GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer); + // // Backend // @@ -146,6 +153,7 @@ extern "C" { // Initialize backend buffers from a measure graph GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); + GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched); GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend); GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 10c21615e6b71..2af16887c4a2e 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -554,10 +554,6 @@ struct cuda_device_capabilities { static cuda_device_capabilities g_device_caps[GGML_CUDA_MAX_DEVICES] = { {0, false, 0} }; -static void * g_scratch_buffer = nullptr; -static size_t g_scratch_size = 0; // disabled by default -static size_t g_scratch_offset = 0; - static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr}; [[noreturn]] @@ -9158,247 +9154,7 @@ static size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_spl return nrows_split*ggml_row_size(tensor->type, tensor->ne[0]); } -void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) { - const int64_t nrows = ggml_nrows(tensor); - - const int64_t ne0 = tensor->ne[0]; - - const size_t nb1 = tensor->nb[1]; - - ggml_backend_type backend = tensor->backend; - ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu; - memset(extra, 0, sizeof(*extra)); - - for (int id = 0; id < g_device_count; ++id) { - if (backend == GGML_BACKEND_GPU && id != g_main_device) { - continue; - } - - ggml_cuda_set_device(id); - - int64_t row_low, row_high; - if (backend == GGML_BACKEND_GPU) { - row_low = 0; - row_high = nrows; - } else if (backend == GGML_BACKEND_GPU_SPLIT) { - const int64_t rounding = get_row_rounding(tensor->type); - - row_low = id == 0 ? 0 : nrows*g_tensor_split[id]; - row_low -= row_low % rounding; - - if (id == g_device_count - 1) { - row_high = nrows; - } else { - row_high = nrows*g_tensor_split[id + 1]; - row_high -= row_high % rounding; - } - } else { - GGML_ASSERT(false); - } - if (row_low == row_high) { - continue; - } - - int64_t nrows_split = row_high - row_low; - - const size_t offset_split = row_low*nb1; - size_t size = ggml_nbytes_split(tensor, nrows_split); - const size_t original_size = size; - - // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses - if (ne0 % MATRIX_ROW_PADDING != 0) { - size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); - } - - char * buf; - CUDA_CHECK(cudaMalloc(&buf, size)); - char * buf_host = (char *)data + offset_split; - - // set padding to 0 to avoid possible NaN values - if (size > original_size) { - CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size)); - } - - CUDA_CHECK(cudaMemcpy(buf, buf_host, original_size, cudaMemcpyHostToDevice)); - - extra->data_device[id] = buf; - - if (backend == GGML_BACKEND_GPU_SPLIT) { - for (int64_t is = 0; is < MAX_STREAMS; ++is) { - CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id][is], cudaEventDisableTiming)); - } - } - } - - tensor->extra = extra; -} - -void ggml_cuda_free_data(struct ggml_tensor * tensor) { - if (!tensor || !tensor->extra || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) { - return; - } - - ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra; - - for (int id = 0; id < g_device_count; ++id) { - ggml_cuda_set_device(id); - if (extra->data_device[id] != nullptr) { - CUDA_CHECK(cudaFree(extra->data_device[id])); - } - - for (int64_t is = 0; is < MAX_STREAMS; ++is) { - if (extra->events[id][is] != nullptr) { - CUDA_CHECK(cudaEventDestroy(extra->events[id][is])); - } - } - } - - delete extra; -} - -static ggml_tensor_extra_gpu * g_temp_tensor_extras = nullptr; -static size_t g_temp_tensor_extra_index = 0; - -static ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() { - if (g_temp_tensor_extras == nullptr) { - g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_CUDA_MAX_NODES]; - } - - size_t alloc_index = g_temp_tensor_extra_index; - g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_CUDA_MAX_NODES; - ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index]; - memset(extra, 0, sizeof(*extra)); - - return extra; -} - -static void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) { - if (scratch && g_scratch_size == 0) { - return; - } - - tensor->backend = GGML_BACKEND_GPU; - - // recursively assign CUDA buffers until a compute tensor is found - if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) { - const ggml_op src0_op = tensor->src[0]->op; - if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) { - ggml_cuda_assign_buffers_impl(tensor->src[0], scratch, force_inplace, no_alloc); - } - } - if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) { - ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace, no_alloc); - } - - if (scratch && no_alloc) { - return; - } - - ggml_tensor_extra_gpu * extra; - - const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) || - tensor->op == GGML_OP_VIEW || - force_inplace; - const size_t size = ggml_nbytes(tensor); - - ggml_cuda_set_device(g_main_device); - if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) { - ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra; - char * src0_ddc = (char *) src0_extra->data_device[g_main_device]; - size_t offset = 0; - if (tensor->op == GGML_OP_VIEW) { - memcpy(&offset, tensor->op_params, sizeof(size_t)); - } - extra = ggml_cuda_alloc_temp_tensor_extra(); - extra->data_device[g_main_device] = src0_ddc + offset; - } else if (tensor->op == GGML_OP_CPY) { - ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu * ) tensor->src[1]->extra; - void * src1_ddv = src1_extra->data_device[g_main_device]; - extra = ggml_cuda_alloc_temp_tensor_extra(); - extra->data_device[g_main_device] = src1_ddv; - } else if (scratch) { - GGML_ASSERT(size <= g_scratch_size); - if (g_scratch_offset + size > g_scratch_size) { - g_scratch_offset = 0; - } - - char * data = (char *) g_scratch_buffer; - if (data == nullptr) { - CUDA_CHECK(cudaMalloc(&data, g_scratch_size)); - g_scratch_buffer = data; - } - extra = ggml_cuda_alloc_temp_tensor_extra(); - extra->data_device[g_main_device] = data + g_scratch_offset; - - g_scratch_offset += size; - - GGML_ASSERT(g_scratch_offset <= g_scratch_size); - } else { // allocate new buffers outside of scratch - void * data; - CUDA_CHECK(cudaMalloc(&data, size)); - CUDA_CHECK(cudaMemset(data, 0, size)); - extra = new ggml_tensor_extra_gpu; - memset(extra, 0, sizeof(*extra)); - extra->data_device[g_main_device] = data; - } - - tensor->extra = extra; -} - -void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset) { - if (g_scratch_size == 0) { - return; - } - if (g_scratch_buffer == nullptr) { - ggml_cuda_set_device(g_main_device); - CUDA_CHECK(cudaMalloc(&g_scratch_buffer, g_scratch_size)); - } - - ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra(); - - const bool inplace = tensor->view_src != nullptr; - - if (inplace && (tensor->view_src->backend == GGML_BACKEND_GPU || tensor->view_src->backend == GGML_BACKEND_GPU_SPLIT)) { - ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->view_src->extra; - char * src0_ddc = (char *) src0_extra->data_device[g_main_device]; - size_t view_offset = 0; - if (tensor->op == GGML_OP_VIEW) { - memcpy(&view_offset, tensor->op_params, sizeof(size_t)); - } - extra->data_device[g_main_device] = src0_ddc + view_offset; - } else { - extra->data_device[g_main_device] = (char *) g_scratch_buffer + offset; - } - - tensor->extra = extra; -} - -void ggml_cuda_copy_to_device(struct ggml_tensor * tensor) { - GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); - GGML_ASSERT(ggml_is_contiguous(tensor)); - - ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra; - ggml_cuda_set_device(g_main_device); - CUDA_CHECK(cudaMemcpy(extra->data_device[g_main_device], tensor->data, ggml_nbytes(tensor), cudaMemcpyHostToDevice)); -} - -void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) { - ggml_cuda_assign_buffers_impl(tensor, true, false, false); -} - -void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor) { - ggml_cuda_assign_buffers_impl(tensor, true, false, true); -} - -void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor) { - ggml_cuda_assign_buffers_impl(tensor, false, false, false); -} - -void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor) { - ggml_cuda_assign_buffers_impl(tensor, false, true, false); -} - -void ggml_cuda_set_main_device(const int main_device) { +static void ggml_cuda_set_main_device(const int main_device) { if (main_device >= g_device_count) { fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n", main_device, g_device_count, g_main_device); @@ -9407,28 +9163,10 @@ void ggml_cuda_set_main_device(const int main_device) { if (g_main_device != main_device && g_device_count > 1) { g_main_device = main_device; - cudaDeviceProp prop; - CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device)); - fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name); - } -} - -void ggml_cuda_set_scratch_size(const size_t scratch_size) { - // this is a hack to not completely break llama.cpp when using multiple models or contexts simultaneously - // it still won't always work as expected, but it's better than nothing - if (scratch_size > g_scratch_size) { - ggml_cuda_free_scratch(); + //cudaDeviceProp prop; + //CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device)); + //fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name); } - g_scratch_size = std::max(g_scratch_size, scratch_size); -} - -void ggml_cuda_free_scratch() { - if (g_scratch_buffer == nullptr) { - return; - } - - CUDA_CHECK(cudaFree(g_scratch_buffer)); - g_scratch_buffer = nullptr; } bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { @@ -9609,6 +9347,12 @@ void ggml_cuda_get_device_description(int device, char * description, size_t des #define UNUSED GGML_UNUSED +struct ggml_backend_context_cuda { + int device; + char name[128]; +}; + + // cuda buffer struct ggml_backend_buffer_context_cuda { @@ -9689,8 +9433,8 @@ static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, gg ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaDeviceSynchronize()); - CUDA_CHECK(cudaMemcpy((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaDeviceSynchronize()); } static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { @@ -9700,7 +9444,6 @@ static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, co ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaDeviceSynchronize()); - CUDA_CHECK(cudaMemcpy(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost)); } @@ -9709,8 +9452,8 @@ static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaDeviceSynchronize()); - CUDA_CHECK(cudaMemset(ctx->dev_ptr, value, buffer->size)); + CUDA_CHECK(cudaDeviceSynchronize()); } static struct ggml_backend_buffer_i cuda_backend_buffer_interface = { @@ -9734,7 +9477,11 @@ static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_bac size = std::max(size, (size_t)1); // cudaMalloc returns null for size 0 void * dev_ptr; - CUDA_CHECK(cudaMalloc(&dev_ptr, size)); + cudaError_t err = cudaMalloc(&dev_ptr, size); + if (err != cudaSuccess) { + fprintf(stderr, "%s: allocating %.2f MiB on device %d: cudaMalloc failed: %s\n", __func__, size/1024.0/1024.0, device, cudaGetErrorString(err)); + return nullptr; + } ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(device, dev_ptr); @@ -9768,9 +9515,14 @@ static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_t } static bool ggml_backend_cuda_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) { - return ggml_backend_is_cuda(backend); + if (!ggml_backend_is_cuda(backend)) { + return false; + } - UNUSED(buft); + int device = (int) (intptr_t) buft->context; + ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + + return device == cuda_ctx->device; } static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { @@ -9838,14 +9590,13 @@ ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() { // backend -struct ggml_backend_context_cuda { - int device; -}; - static const char * ggml_backend_cuda_name(ggml_backend_t backend) { - return GGML_CUDA_NAME; - - UNUSED(backend); + //return GGML_CUDA_NAME; + //UNUSED(backend); + ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + // TODO: on init + sprintf(cuda_ctx->name, "%s%d", "CUD", cuda_ctx->device); + return cuda_ctx->name; } static void ggml_backend_cuda_free(ggml_backend_t backend) { @@ -10125,6 +9876,21 @@ bool ggml_backend_is_cuda(ggml_backend_t backend) { return backend->iface.get_name == ggml_backend_cuda_name; } +int ggml_backend_cuda_get_device_count() { + return ggml_cuda_get_device_count(); +} + +void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size) { + ggml_cuda_get_device_description(device, description, description_size); +} + +void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total) { + ggml_cuda_set_device(device); + + CUDA_CHECK(cudaMemGetInfo(free, total)); +} + +// backend registry static ggml_backend_t ggml_backend_reg_cuda_init(const char * params, void * user_data) { ggml_backend_t cuda_backend = ggml_backend_cuda_init((int) (intptr_t) user_data); return cuda_backend; diff --git a/ggml-cuda.h b/ggml-cuda.h index cdb0c0c41618a..7b48782d64e95 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -27,22 +27,6 @@ GGML_API void * ggml_cuda_host_malloc(size_t size); GGML_API void ggml_cuda_host_free(void * ptr); GGML_API bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -GGML_API void ggml_cuda_set_tensor_split(const float * tensor_split); -GGML_API void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor); -GGML_API void ggml_cuda_free_data(struct ggml_tensor * tensor); - -GGML_API void ggml_cuda_assign_buffers(struct ggml_tensor * tensor); -GGML_API void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor); -GGML_API void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor); - -GGML_API void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor); -GGML_API void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset); -GGML_API void ggml_cuda_copy_to_device(struct ggml_tensor * tensor); - -GGML_API void ggml_cuda_set_main_device(int main_device); -GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q); -GGML_API void ggml_cuda_set_scratch_size(size_t scratch_size); -GGML_API void ggml_cuda_free_scratch(void); GGML_API bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor); GGML_API int ggml_cuda_get_device_count(void); @@ -52,13 +36,17 @@ GGML_API void ggml_cuda_get_device_description(int device, char * description, GGML_API ggml_backend_t ggml_backend_cuda_init(int device); GGML_API bool ggml_backend_is_cuda(ggml_backend_t backend); -GGML_API int ggml_backend_cuda_get_device(ggml_backend_t backend); GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device); - -// pinned host buffer for use with CPU backend for faster copies between CPU and GPU +// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void); +GGML_API int ggml_backend_cuda_get_device_count(void); +GGML_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size); +GGML_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total); + + + #ifdef __cplusplus } #endif diff --git a/ggml.c b/ggml.c index 62f0f18ef3b70..05531a73d9ed4 100644 --- a/ggml.c +++ b/ggml.c @@ -2324,6 +2324,10 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { } void ggml_free(struct ggml_context * ctx) { + if (ctx == NULL) { + return; + } + // make this function thread safe ggml_critical_section_start(); diff --git a/llama.cpp b/llama.cpp index 3bb056dba2e6d..266f2b1f81f93 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1,5 +1,4 @@ #define LLAMA_API_INTERNAL -//#define LLAMA_GGML_BACKEND_CUDA_TEST // for testing only - enables ggml-cuda through ggml-backend, disables partial offloading #include "llama.h" #include "unicode.h" @@ -152,10 +151,6 @@ static bool is_float_close(float a, float b, float abs_tol) { return std::fabs(b - a) <= abs_tol; } -#ifdef GGML_USE_CPU_HBM -#include -#endif - static void zeros(std::ofstream & file, size_t n) { char zero = 0; for (size_t i = 0; i < n; ++i) { @@ -1190,12 +1185,6 @@ struct llama_mlock { #endif }; -typedef void (*offload_func_t)(struct ggml_tensor * tensor); - -static void ggml_offload_nop(struct ggml_tensor * tensor) { - (void) tensor; -} - static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) { std::vector result(8, 0); const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size()); @@ -1211,19 +1200,15 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_ return std::string(result.data(), result.size()); } -static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) { +static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer) { ggml_backend_buffer_type_t buft = nullptr; -#ifdef GGML_USE_METAL - if (n_gpu_layers > 0) { - buft = ggml_backend_metal_buffer_type(); - } -#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) - if (n_gpu_layers > 0) { - buft = ggml_backend_cuda_buffer_type(0); +#if defined(GGML_USE_CUBLAS) + // in some cases such as the KV cache, there is no benefit to using a host buffer, + // since the data is never copied to the GPU + if (host_buffer) { + buft = ggml_backend_cuda_host_buffer_type(); } -#elif defined(GGML_USE_CUBLAS) - buft = ggml_backend_cuda_host_buffer_type(); #elif defined(GGML_USE_CPU_HBM) buft = ggml_backend_cpu_hbm_buffer_type(); #endif @@ -1231,10 +1216,42 @@ static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) { if (buft == nullptr) { buft = ggml_backend_cpu_buffer_type(); } + return buft; + + GGML_UNUSED(host_buffer); +} + +static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) { + ggml_backend_buffer_type_t buft = nullptr; + +#ifdef GGML_USE_METAL + buft = ggml_backend_metal_buffer_type(); +#elif defined(GGML_USE_CUBLAS) + buft = ggml_backend_cuda_buffer_type(gpu); +#endif + + if (buft == nullptr) { + buft = llama_default_buffer_type_cpu(true); + } + return buft; + + GGML_UNUSED(gpu); +} + +static ggml_backend_buffer_type_t llama_default_buffer_type_split(int main_gpu, const float * tensor_split) { + ggml_backend_buffer_type_t buft = nullptr; + +#ifdef GGML_USE_CUBLAS + // TODO + // buft = ggml_backend_cuda_buffer_type_split(tensor_split); +#endif + if (buft == nullptr) { + buft = llama_default_buffer_type_offload(main_gpu); + } return buft; - GGML_UNUSED(n_gpu_layers); + GGML_UNUSED(tensor_split); } // @@ -1445,24 +1462,16 @@ struct llama_kv_cache { std::vector k_l; // per layer std::vector v_l; - struct ggml_context * ctx = NULL; - - ggml_backend_buffer_t buf = NULL; + std::vector ctxs; + std::vector bufs; ~llama_kv_cache() { -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - if (ggml_cublas_loaded()) { - for (size_t i = 0; i < k_l.size(); ++i) { - ggml_cuda_free_data(k_l[i]); - ggml_cuda_free_data(v_l[i]); - } - } -#endif - if (ctx) { + for (struct ggml_context * ctx : ctxs) { ggml_free(ctx); } - - ggml_backend_buffer_free(buf); + for (ggml_backend_buffer_t buf : bufs) { + ggml_backend_buffer_free(buf); + } } }; @@ -1539,16 +1548,31 @@ struct llama_model { std::vector layers; + int main_gpu; int n_gpu_layers; // gguf metadata std::unordered_map gguf_kv; - // context - struct ggml_context * ctx = NULL; + // layer -> buffer type mapping + struct layer_buft { + layer_buft() : buft_matrix(nullptr), buft(nullptr) {} + layer_buft(ggml_backend_buffer_type_t matrix) : buft_matrix(matrix), buft(matrix) {} + layer_buft(ggml_backend_buffer_type_t matrix, ggml_backend_buffer_type_t other) : buft_matrix(matrix), buft(other) {} + + ggml_backend_buffer_type_t buft_matrix; // matrices only - used by split buffers and backends that support only matrix multiplication + ggml_backend_buffer_type_t buft; // everything else + }; + + layer_buft buft_input; + layer_buft buft_output; + std::vector buft_layer; - // the model memory buffer - ggml_backend_buffer_t buf = NULL; + // contexts where the model tensors metadata is stored + std::vector ctxs; + + // the model memory buffers for the tensor data + std::vector bufs; // model memory mapped file std::unique_ptr mapping; @@ -1564,39 +1588,27 @@ struct llama_model { int64_t t_start_us = 0; ~llama_model() { -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - if (ggml_cublas_loaded()) { - for (size_t i = 0; i < tensors_by_name.size(); ++i) { - ggml_cuda_free_data(tensors_by_name[i].second); - } - ggml_cuda_free_scratch(); - } -#endif - -#if defined(GGML_USE_CLBLAST) - for (size_t i = 0; i < tensors_by_name.size(); ++i) { - ggml_cl_free_data(tensors_by_name[i].second); - } -#endif - if (ctx) { + for (struct ggml_context * ctx : ctxs) { ggml_free(ctx); } - - ggml_backend_buffer_free(buf); + for (ggml_backend_buffer_t buf : bufs) { + ggml_backend_buffer_free(buf); + } } }; struct llama_context { llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {} ~llama_context() { - ggml_allocr_free(alloc); - ggml_backend_buffer_free(buf_alloc); + ggml_backend_sched_free(sched); + // TODO: free all backends ggml_backend_free(backend); } llama_cparams cparams; ggml_backend_t backend = nullptr; + ggml_backend_t backend_cpu = nullptr; const llama_model & model; @@ -1630,8 +1642,9 @@ struct llama_context { // memory buffers used to evaluate the model std::vector buf_compute_meta; - ggml_backend_buffer_t buf_alloc = NULL; - ggml_allocr * alloc = NULL; + ggml_backend_sched_t sched = nullptr; + // allocator for the input tensors + ggml_tallocr * alloc = nullptr; // temporary buffer for copying data to/from the backend std::vector> buf_copy; @@ -1646,16 +1659,17 @@ struct llama_context { // static bool llama_kv_cache_init( - const struct llama_hparams & hparams, struct llama_kv_cache & cache, + const llama_model & model, ggml_type ktype, ggml_type vtype, uint32_t n_ctx, - int n_gpu_layers, bool offload) { + const struct llama_hparams & hparams = model.hparams; + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(); const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(); - const uint32_t n_layer = hparams.n_layer; + const int64_t n_layer = hparams.n_layer; cache.has_shift = false; @@ -1666,62 +1680,61 @@ static bool llama_kv_cache_init( cache.cells.clear(); cache.cells.resize(n_ctx); - struct ggml_init_params params; - params.mem_size = 2u*n_layer*ggml_tensor_overhead(); - params.mem_buffer = NULL; - params.no_alloc = true; - - cache.ctx = ggml_init(params); - - size_t vram_kv_cache = 0; + // count used buffer types + std::map buft_layer_count; + if (offload) { + for (int64_t i = 0; i < n_layer; ++i) { + buft_layer_count[model.buft_layer[i].buft]++; + } + } else { + buft_layer_count[llama_default_buffer_type_cpu(false)] = n_layer; + } - if (!cache.ctx) { - LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__); - return false; + std::map ctx_map; + for (auto & it : buft_layer_count) { + int n_layers = it.second; + struct ggml_init_params params = { + /*.mem_size =*/ 2u*n_layers*ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + ggml_context * ctx = ggml_init(params); + if (!ctx) { + LLAMA_LOG_ERROR("%s: failed to allocate context for kv cache\n", __func__); + return false; + } + ctx_map[it.first] = ctx; + cache.ctxs.push_back(ctx); } cache.k_l.reserve(n_layer); cache.v_l.reserve(n_layer); - const int i_gpu_start = (int) n_layer - n_gpu_layers; - for (int i = 0; i < (int) n_layer; i++) { - ggml_tensor * k = ggml_new_tensor_1d(cache.ctx, ktype, n_embd_k_gqa*n_ctx); - ggml_tensor * v = ggml_new_tensor_1d(cache.ctx, vtype, n_embd_v_gqa*n_ctx); + struct ggml_context * ctx = offload ? ctx_map[model.buft_layer[i].buft] : cache.ctxs.front(); + ggml_tensor * k = ggml_new_tensor_1d(ctx, ktype, n_embd_k_gqa*n_ctx); + ggml_tensor * v = ggml_new_tensor_1d(ctx, vtype, n_embd_v_gqa*n_ctx); ggml_format_name(k, "cache_k_l%d", i); ggml_format_name(v, "cache_v_l%d", i); cache.k_l.push_back(k); cache.v_l.push_back(v); -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - if (i >= i_gpu_start) { - if (offload) { - ggml_cuda_assign_buffers_no_scratch(k); - ggml_cuda_assign_buffers_no_scratch(v); - vram_kv_cache += ggml_nbytes(k); - vram_kv_cache += ggml_nbytes(v); - // HACK: mark tensor as allocated - k->data = v->data = (void *)(uintptr_t)1; - } - } -#endif // GGML_USE_CUBLAS } - // allocate tensors - cache.buf = ggml_backend_alloc_ctx_tensors_from_buft(cache.ctx, llama_default_buffer_type(n_gpu_layers)); - - // buf may be NULL with full offload - if (cache.buf) { - // initialize the buffer to avoid NaNs in the padding - ggml_backend_buffer_clear(cache.buf, 0); - } - - if (vram_kv_cache > 0) { - LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0); + // allocate tensors and initialize the buffers to avoid NaNs in the padding + for (auto it : ctx_map) { + ggml_backend_buffer_type_t buft = it.first; + ggml_context * ctx = it.second; + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (!buf) { + LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__); + return false; + } + ggml_backend_buffer_clear(buf, 0); + // FIXME: buffer type name + LLAMA_LOG_INFO("%s: KV %10s buffer size: %.02f MiB\n", __func__, "???", ggml_backend_buffer_get_size(buf)/1024.0/1024.0); + cache.bufs.push_back(buf); } - GGML_UNUSED(i_gpu_start); - GGML_UNUSED(offload); - return true; } @@ -2326,9 +2339,8 @@ struct llama_model_loader { return get_tensor_meta(get_tensor_name(i)); } - struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) { + struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta) { struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta); - tensor->backend = backend; // TODO: ggml_set_backend ggml_set_name(tensor, ggml_get_name(meta)); n_created++; @@ -2336,7 +2348,7 @@ struct llama_model_loader { return tensor; } - struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, ggml_backend_type backend, bool required = true) { + struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, bool required = true) { struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); if (cur == NULL) { @@ -2346,11 +2358,12 @@ struct llama_model_loader { throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); } - if (backend == GGML_BACKEND_GPU_SPLIT) { - if (ne.size() == 1) { - throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str())); - } - } + // FIXME: this should be ggml_backend_cuda_split_buffer_type + //if (backend == GGML_BACKEND_GPU_SPLIT) { + // if (ne.size() == 1) { + // throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str())); + // } + //} { bool is_ok = true; @@ -2369,7 +2382,7 @@ struct llama_model_loader { } } - return create_tensor_for(ctx, cur, backend); + return create_tensor_for(ctx, cur); } void done_getting_tensors() const { @@ -2396,7 +2409,7 @@ struct llama_model_loader { for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); - if (cur->backend == GGML_BACKEND_CPU) { + if (cur->backend == buffer_type_cpu) { size_t tensor_end = gguf_get_tensor_offset(ctx_gguf, i) + ggml_nbytes(cur); size_pref = std::max(size_pref, tensor_end); } @@ -2424,36 +2437,36 @@ struct llama_model_loader { } } - // Returns false if cancelled by progress_callback - bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) const { - size_t size_data = 0; + size_t size_done = 0; + size_t size_data = 0; + size_t mmap_first = -1; + size_t mmap_last = 0; - for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { - struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); - size_data += ggml_nbytes(cur); - } + // Returns false if cancelled by progress_callback + bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) { + // TODO: move to a better place + if (size_data == 0) { + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { + struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i)); + size_data += ggml_nbytes(cur); + } - if (use_mmap && buf_mmap) { - if (lmlock) { - lmlock->init(mapping->addr); + if (use_mmap && buf_mmap) { + // FIXME + //if (lmlock) { + // lmlock->init(mapping->addr); + //} } } -#if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST) - const bool legacy_offload = true; -#else - const bool legacy_offload = false; -#endif - std::vector> read_buf; - size_t size_done = 0; - - size_t mmap_first = -1; - size_t mmap_last = 0; for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); + if (!cur) { + continue; + } GGML_ASSERT(cur); // unused tensors should have been caught by load_data already if (progress_callback) { @@ -2464,69 +2477,48 @@ struct llama_model_loader { const size_t offs = file_offset(ggml_get_name(cur)); - if (!legacy_offload || cur->backend == GGML_BACKEND_CPU) { - if (use_mmap && mapping) { - if (buf_mmap) { - ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs); - if (lmlock) { - lmlock->grow_to(offs + ggml_nbytes(cur)); - } - mmap_first = std::min(mmap_first, offs); - mmap_last = std::max(mmap_last, offs + ggml_nbytes(cur)); - } else { - ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur)); + // FIXME + if (use_mmap && mapping) { + if (buf_mmap && cur->data == nullptr) { + ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs); + if (lmlock) { + lmlock->grow_to(offs + ggml_nbytes(cur)); } + mmap_first = std::min(mmap_first, offs); + mmap_last = std::max(mmap_last, offs + ggml_nbytes(cur)); } else { - if (ggml_backend_buffer_is_host(cur->buffer)) { - file.seek(offs, SEEK_SET); - file.read_raw(cur->data, ggml_nbytes(cur)); - } else { - read_buf.resize(ggml_nbytes(cur)); - file.seek(offs, SEEK_SET); - file.read_raw(read_buf.data(), ggml_nbytes(cur)); - ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur)); - } + ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur)); } } else { - // HACK: mark tensor as allocated - cur->data = (void *)(uintptr_t)1; - void * data; - if (use_mmap && mapping) { - data = (uint8_t *) mapping->addr + offs; + if (ggml_backend_buffer_is_host(cur->buffer)) { + file.seek(offs, SEEK_SET); + file.read_raw(cur->data, ggml_nbytes(cur)); } else { read_buf.resize(ggml_nbytes(cur)); file.seek(offs, SEEK_SET); file.read_raw(read_buf.data(), ggml_nbytes(cur)); - data = read_buf.data(); + ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur)); } - -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - ggml_cuda_transform_tensor(data, cur); -#elif defined(GGML_USE_CLBLAST) - GGML_ASSERT(cur->backend == GGML_BACKEND_GPU); - ggml_cl_transform_tensor(data, cur); -#else - GGML_ASSERT(!"GPU tensor without a GPU backend"); - GGML_UNUSED(data); -#endif } size_done += ggml_nbytes(cur); } - // unmap offloaded tensors and metadata - if (use_mmap && mapping) { - mapping->unmap_fragment(0, mmap_first); - mapping->unmap_fragment(mmap_last, mapping->size); - } - - if (progress_callback) { + if (progress_callback && size_done >= size_data) { // Even though the model is done loading, we still honor // cancellation since we need to free allocations. return progress_callback(1.0f, progress_callback_user_data); } return true; } + + void unmap_fragments() { + // unmap offloaded tensors and metadata + if (use_mmap && mapping) { + mapping->unmap_fragment(0, mmap_first); + mapping->unmap_fragment(mmap_last, mapping->size); + } + } }; // @@ -3148,702 +3140,560 @@ static bool llm_load_tensors( void * progress_callback_user_data) { model.t_start_us = ggml_time_us(); - auto & ctx = model.ctx; auto & hparams = model.hparams; + model.main_gpu = main_gpu; model.n_gpu_layers = n_gpu_layers; - size_t ctx_size = ggml_tensor_overhead() * ml.n_tensors; + size_t ctx_size = ggml_tensor_overhead()*ml.n_tensors; + + // TODO: user configurable + enum gpu_split_mode { + CUDA_SPLIT_NONE, // single GPU + CUDA_SPLIT_LAYER, // offload layers to different GPUs + CUDA_SPLIT_ROW // split matrix rows across GPUs + }; + + gpu_split_mode split_mode = CUDA_SPLIT_LAYER; + const int64_t n_layer = hparams.n_layer; + const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0); + - LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, ctx_size/1024.0/1024.0); + // there is very little benefit to offloading the input layers, so always keep it on the CPU + model.buft_input = llama_default_buffer_type_cpu(true); + model.buft_layer.resize(n_layer); + // cpu layers + for (int64_t i = 0; i < i_gpu_start; ++i) { + model.buft_layer[i] = llama_default_buffer_type_cpu(true); + } - // create the ggml context +#ifdef GGML_USE_CUBLAS + if (split_mode == CUDA_SPLIT_LAYER) { + int device_count = ggml_backend_cuda_get_device_count(); + float splits[GGML_CUDA_MAX_DEVICES]; + std::copy(tensor_split, tensor_split + device_count, splits); + bool all_zero = std::all_of(splits, splits + device_count, [](float x) { return x == 0.0f; }); + if (all_zero) { + // set by free memory + for (int i = 0; i < device_count; ++i) { + size_t total; + size_t free; + ggml_backend_cuda_get_device_memory(i, &total, &free); + splits[i] = free; + } + } + float split_sum = 0.0f; + for (int i = 0; i < device_count; ++i) { + split_sum += splits[i]; + splits[i] = split_sum; + } + for (int i = 0; i < device_count; ++i) { + splits[i] /= split_sum; + printf("split[%d] = %.2f\n", i, splits[i]); + } + + int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1); + + // assign layers proportionally, in reverse order + for (int64_t i = i_gpu_start; i < n_layer; ++i) { + int layer_gpu = std::upper_bound(splits, splits + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits; + printf("layer %d -> gpu %d\n", (int)i, layer_gpu); + model.buft_layer[i] = llama_default_buffer_type_offload(layer_gpu); + } + // output layer + if (n_gpu_layers > n_layer) { + int layer_gpu = std::upper_bound(splits, splits + device_count, float(n_layer)/act_gpu_layers) - splits; + printf("output -> gpu %d\n", layer_gpu); + model.buft_output = llama_default_buffer_type_offload(layer_gpu); + } else { + model.buft_output = llama_default_buffer_type_cpu(true); + } + } else +#endif { + // offload layers + for (int64_t i = i_gpu_start; i < n_layer; ++i) { + model.buft_layer[i] = { llama_default_buffer_type_split(main_gpu, tensor_split), llama_default_buffer_type_offload(main_gpu) }; + } + // output layer + if (n_gpu_layers > n_layer) { + model.buft_output = { llama_default_buffer_type_split(main_gpu, tensor_split), llama_default_buffer_type_offload(main_gpu) }; + } else { + model.buft_output = llama_default_buffer_type_cpu(true); + } + } + + // count used buffer types + std::map buft_layer_count; + buft_layer_count[model.buft_input.buft]++; + buft_layer_count[model.buft_input.buft_matrix]++; + buft_layer_count[model.buft_output.buft]++; + buft_layer_count[model.buft_output.buft_matrix]++; + for (int64_t i = 0; i < n_layer; ++i) { + buft_layer_count[model.buft_layer[i].buft]++; + buft_layer_count[model.buft_layer[i].buft_matrix]++; + } + + // create one context per buffer type + std::map ctx_map; + for (auto & it : buft_layer_count) { struct ggml_init_params params = { /*.mem_size =*/ ctx_size, /*.mem_buffer =*/ NULL, /*.no_alloc =*/ true, }; - - model.ctx = ggml_init(params); - if (!model.ctx) { - throw std::runtime_error(format("ggml_init() failed")); + ggml_context * ctx = ggml_init(params); + if (!ctx) { + throw std::runtime_error(format("failed to create context")); } + ctx_map[it.first] = ctx; + model.ctxs.push_back(ctx); } - (void) main_gpu; - - enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU; - enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU; - -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - if (ggml_cublas_loaded()) { - LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__); - ggml_cuda_set_main_device(main_gpu); - - llama_backend_offload = GGML_BACKEND_GPU; - llama_backend_offload_split = GGML_BACKEND_GPU_SPLIT; - } -#elif defined(GGML_USE_CLBLAST) - LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__); - llama_backend_offload = GGML_BACKEND_GPU; - llama_backend_offload_split = GGML_BACKEND_GPU; -#endif + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0); // create tensors for the weights { const int64_t n_embd = hparams.n_embd; const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); + const int64_t n_embd_gqa = n_embd_v_gqa; const int64_t n_layer = hparams.n_layer; const int64_t n_vocab = hparams.n_vocab; + const int64_t n_ff = hparams.n_ff; + + GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); + + // FIXME: for metal, it may be better to put the input on the GPU context - however it may make no difference in practice, + // and it would increase the metal buffer size + ggml_context * ctx_input = ctx_map.at(model.buft_input.buft); + ggml_context * ctx_output = ctx_map.at(model.buft_output.buft); + ggml_context * ctx_output_split = ctx_map.at(model.buft_output.buft_matrix); + auto ctx_for_layer = [&](int i) { return ctx_map.at(model.buft_layer[i].buft); }; + auto ctx_for_layer_split = [&](int i) { return ctx_map.at(model.buft_layer[i].buft_matrix); }; + + model.layers.resize(n_layer); const auto tn = LLM_TN(model.arch); switch (model.arch) { case LLM_ARCH_LLAMA: case LLM_ARCH_REFACT: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); - layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split); - layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}); + layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}); + layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); // optional bias tensors - layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, false); - layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, false); - layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, false); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, false); + layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, false); + layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false); + layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false); + layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, false); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); - layer.ffn_gate_inp = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd}, backend, false); + layer.ffn_gate_inp = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd}, false); if (layer.ffn_gate_inp == nullptr) { GGML_ASSERT(hparams.n_expert == 0); GGML_ASSERT(hparams.n_expert_used == 0); - layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); } else { GGML_ASSERT(hparams.n_expert > 0); GGML_ASSERT(hparams.n_expert_used > 0); // MoE branch for (uint32_t x = 0; x < hparams.n_expert; ++x) { - layer.ffn_gate_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), {n_embd, n_ff}, backend_split); - layer.ffn_down_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd}, backend_split); - layer.ffn_up_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff}, backend_split); + layer.ffn_gate_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), {n_embd, n_ff}); + layer.ffn_down_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd}); + layer.ffn_up_exp[x] = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff}); } } } } break; case LLM_ARCH_BAICHUAN: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); - layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split); - layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}); + layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}); + layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); - layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); } } break; case LLM_ARCH_FALCON: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}); if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) { - layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend); - layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend); + layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}); + layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}); } - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); } } break; case LLM_ARCH_STARCODER: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); + layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); + layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}); + layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); + layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}); } } break; case LLM_ARCH_PERSIMMON: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); - const int i_gpu_start = n_layer - n_gpu_layers; - model.layers.resize(n_layer); - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); - layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend); - layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend); - layer.attn_k_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}, backend); - layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend); + + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}); + + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}); + + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); + layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); + + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}); + layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}); + + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); + layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}); + + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); + layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}); + + layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}); + layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}); + + layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}); + layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}); } } break; case LLM_ARCH_BLOOM: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU); - model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + model.tok_norm = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}); + model.tok_norm_b = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); + layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); + layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}); + layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); + layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}); } } break; case LLM_ARCH_MPT: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); // AWQ ScaleActivation layer - layer.ffn_act = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, backend, false); + layer.ffn_act = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, false); } } break; case LLM_ARCH_STABLELM: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - /* - llama_model_loader: - tensor 4: blk.0.attn_output.weight f16 [ 2560, 2560, 1, 1 ] - */ - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}); - layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split); - layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}); + layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}); + layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); + layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}); - layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); } } break; case LLM_ARCH_QWEN: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - } + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); - const uint32_t n_ff = hparams.n_ff / 2; - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); + // output + { + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + } - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd * 3}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd * 3}, backend); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd * 3}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd * 3}); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); - layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2}); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd}); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2}); } } break; case LLM_ARCH_PHI2: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); - model.output_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + model.output_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); + layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}); + layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); + layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}); } } break; case LLM_ARCH_PLAMO: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); - layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split); - layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); + layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}); + layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}); + layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); - layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); + layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); } } break; case LLM_ARCH_GPT2: { - model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU); - model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU); + model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + model.pos_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}); // output { - ggml_backend_type backend_norm; - ggml_backend_type backend_output; - - if (n_gpu_layers > int(n_layer)) { - backend_norm = llama_backend_offload; - backend_output = llama_backend_offload_split; - } else { - backend_norm = GGML_BACKEND_CPU; - backend_output = GGML_BACKEND_CPU; - } - - model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm); - model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm); - model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); } - const uint32_t n_ff = hparams.n_ff; - const int64_t n_embd_gqa = n_embd_v_gqa; - GGML_ASSERT(n_embd_gqa == n_embd / hparams.n_gqa()); - GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - - const int i_gpu_start = n_layer - n_gpu_layers; - - model.layers.resize(n_layer); - - for (uint32_t i = 0; i < n_layer; ++i) { - const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT - const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT + for (int i = 0; i < n_layer; ++i) { + ggml_context * ctx_layer = ctx_for_layer(i); + ggml_context * ctx_split = ctx_for_layer_split(i); auto & layer = model.layers[i]; - layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend); - layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend); + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + layer.attn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}); - layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split); - layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}); - layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); - layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend); + layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); + layer.bo = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); - layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); - layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend); + layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); + layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}); - layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split); - layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend); + layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}); + layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}); - layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split); - layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend); + layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}); + layer.ffn_up_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}); } } break; default: @@ -3855,76 +3705,52 @@ static bool llm_load_tensors( ml.init_mapping(); - // allocate tensors - size_t vram_weights = 0; - size_t buf_size = 0; + // TODO: move to ggml + //auto ggml_n_tensors = [](struct ggml_context * ctx) { + // int n = 0; + // for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) { + // ++n; + // } + // return n; + //}; - ggml_backend_buffer_type_t buft = llama_default_buffer_type(n_gpu_layers); + // create backend buffers - for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { - // GGML_BACKEND_GPU tensors are for CUDA and OpenCL only, which are handled separately without ggml-backend - if (t->backend == GGML_BACKEND_CPU) { - buf_size += GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), ggml_backend_buft_get_alignment(buft)); - } else { - vram_weights += ggml_nbytes(t); - } - } + std::vector> ctx_bufs; - // create backend buffer - ggml_backend_buffer_t buf_mmap = nullptr; + for (auto & it : ctx_map) { + ggml_backend_buffer_type_t buft = it.first; + ggml_context * ctx = it.second; + ggml_backend_buffer_t buf = nullptr; + // TODO: do not use whole model mapping for the buffer, only the region containing the tensors + // this is important for metal: if the entire model could be mapped, then we could use metal for all layers + if (ml.use_mmap && buft == ggml_backend_cpu_buffer_type()) { + buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size); + } #ifdef GGML_USE_METAL - if (n_gpu_layers > 0) { - if (ml.use_mmap) { + else if (ml.use_mmap && buft == ggml_backend_metal_buffer_type()) { const size_t max_size = ggml_get_max_tensor_size(ctx); - model.buf = ggml_backend_metal_buffer_from_ptr(ml.mapping->addr, ml.mapping->size, max_size); - buf_mmap = model.buf; - } else { - model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_metal_buffer_type()); + buf = ggml_backend_metal_buffer_from_ptr(ml.mapping->addr, ml.mapping->size, max_size); } - } -#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) - // for testing only - if (n_gpu_layers > 0) { - model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cuda_buffer_type(0)); - } #endif - - if (model.buf == nullptr) { - // CPU backend, and indirectly CUDA and OpenCL - if (ml.use_mmap) { - model.buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size); - buf_mmap = model.buf; - } else { - // allocate only CPU tensors - model.buf = ggml_backend_buft_alloc_buffer(buft, buf_size); - ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(model.buf); - for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { - if (t->backend == GGML_BACKEND_CPU) { - ggml_tallocr_alloc(alloc, t); - } + else { + buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (buf != nullptr && use_mlock && ggml_backend_buffer_is_host(buf)) { + model.mlock_buf.init (ggml_backend_buffer_get_base(buf)); + model.mlock_buf.grow_to(ggml_backend_buffer_get_size(buf)); } - ggml_tallocr_free(alloc); } - } - - if (use_mlock && ggml_backend_buffer_is_host(model.buf)) { - model.mlock_buf.init (ggml_backend_buffer_get_base(model.buf)); - model.mlock_buf.grow_to(ggml_backend_buffer_get_size(model.buf)); + if (buf == nullptr) { + throw std::runtime_error("failed to allocate buffer"); + } + ggml_backend_buffer_set_usage(buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); + model.bufs.push_back(buf); + ctx_bufs.emplace_back(ctx, buf); } // print memory requirements { - size_t sys_mem_required = ctx_size + buf_size; - - if (sys_mem_required > 0) { - LLAMA_LOG_INFO("%s: system memory used = %7.2f MiB\n", __func__, sys_mem_required / 1024.0 / 1024.0); - } - if (vram_weights > 0) { - LLAMA_LOG_INFO("%s: VRAM used = %7.2f MiB\n", __func__, vram_weights / 1024.0 / 1024.0); - } - -#if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST) const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu); @@ -3936,24 +3762,35 @@ static bool llm_load_tensors( const int max_offloadable_layers = hparams.n_layer + 1; LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); -#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) - } -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - ggml_cuda_set_tensor_split(tensor_split); -#else - GGML_UNUSED(tensor_split); -#endif // GGML_USE_CUBLAS + for (ggml_backend_buffer_t buf : model.bufs) { + // FIXME: add buffer type names to ggml-backend + const char * name; + if (ggml_backend_buffer_type(buf) == ggml_backend_cpu_buffer_type()) { + name = "CPU"; + } else { + name = "???"; + } + LLAMA_LOG_INFO("%s: %10s buffer size = %7.2f MiB\n", __func__, name, ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); + } + } // populate tensors_by_name - for (int i = 0; i < ml.n_tensors; ++i) { - struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i)); - model.tensors_by_name.emplace_back(ggml_get_name(cur), cur); + for (ggml_context * ctx : model.ctxs) { + for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) { + model.tensors_by_name.emplace_back(ggml_get_name(cur), cur); + } } - if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf_mmap, use_mlock ? &model.mlock_mmap : NULL)) { - return false; + // load data + for (auto & it : ctx_bufs) { + ggml_context * ctx = it.first; + ggml_backend_buffer_t buf = it.second; + if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf, use_mlock ? &model.mlock_mmap : NULL)) { + return false; + } } + ml.unmap_fragments(); model.mapping = std::move(ml.mapping); @@ -4438,8 +4275,6 @@ struct llm_build_context { do_rope_shift (worst_case || kv_self.has_shift), cb (cb), buf_compute_meta (lctx.buf_compute_meta) { - GGML_ASSERT(!!kv_self.ctx); - // all initializations should be done in init() } @@ -6047,199 +5882,13 @@ struct llm_build_context { } }; -// -// tensor offloading helpers -// -// TODO: will be removed with backend v2 - -enum llm_offload_func_e { - OFFLOAD_FUNC_NOP, - OFFLOAD_FUNC, - OFFLOAD_FUNC_FRC, // force offload - OFFLOAD_FUNC_KQV, - OFFLOAD_FUNC_NR, - OFFLOAD_FUNC_EMB, // embeddings - OFFLOAD_FUNC_OUT, -}; - -// TODO: will be removed with backend v2 -struct llm_offload_trie { - struct node { - ~node() { - for (int i = 0; i < 256; ++i) { - if (children[i]) { - delete children[i]; - } - } - } - - node * children[256] = { nullptr }; - llm_offload_func_e func = OFFLOAD_FUNC_NOP; - }; - - llm_offload_trie() { - root = new node; - } - - llm_offload_trie(const std::unordered_map & map) { - root = new node; - - for (const auto & kv : map) { - add(kv.first, kv.second); - } - } - - ~llm_offload_trie() { - delete root; - } - - void add(const char * name, llm_offload_func_e func) { - node * cur = root; - - for (int i = 0; ; ++i) { - const uint8_t c = name[i]; - - if (!c) { - break; - } - - if (!cur->children[c]) { - cur->children[c] = new node; - } - - cur = cur->children[c]; - } - - cur->func = func; - } - - llm_offload_func_e find(const char * name) const { - const node * cur = root; - - for (int i = 0; ; ++i) { - const uint8_t c = name[i]; - - if (!c) { - break; - } - - if (!cur->children[c]) { - return OFFLOAD_FUNC_NOP; - } - - cur = cur->children[c]; - } - - return cur->func; - } - - node * root = nullptr; -}; - -// TODO: will be removed with backend v2 -static const std::unordered_map k_offload_map = { - //{ "inp_tokens", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel - //{ "inp_embd", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel - { "pos_embd", OFFLOAD_FUNC_NR }, - - { "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope) - { "KQ_mask", OFFLOAD_FUNC_FRC }, - { "K_shift", OFFLOAD_FUNC_FRC }, - - { "K_shifted", OFFLOAD_FUNC }, - - { "inp_norm", OFFLOAD_FUNC_NR }, - { "inp_norm_w", OFFLOAD_FUNC_NR }, - { "inp_norm_wb", OFFLOAD_FUNC_NR }, - - { "norm", OFFLOAD_FUNC }, - { "norm_w", OFFLOAD_FUNC }, - { "norm_wb", OFFLOAD_FUNC }, - - { "attn_norm", OFFLOAD_FUNC }, - { "attn_norm_2", OFFLOAD_FUNC }, - - { "wqkv", OFFLOAD_FUNC_KQV }, - { "bqkv", OFFLOAD_FUNC_KQV }, - { "wqkv_clamped", OFFLOAD_FUNC_KQV }, - - { "tmpk", OFFLOAD_FUNC_KQV }, - { "tmpq", OFFLOAD_FUNC_KQV }, - { "tmpv", OFFLOAD_FUNC_KQV }, - { "Kcur", OFFLOAD_FUNC_KQV }, - { "Qcur", OFFLOAD_FUNC_KQV }, - { "Vcur", OFFLOAD_FUNC_KQV }, - - { "krot", OFFLOAD_FUNC_KQV }, - { "qrot", OFFLOAD_FUNC_KQV }, - { "kpass", OFFLOAD_FUNC_KQV }, - { "qpass", OFFLOAD_FUNC_KQV }, - { "krotated", OFFLOAD_FUNC_KQV }, - { "qrotated", OFFLOAD_FUNC_KQV }, - - { "q", OFFLOAD_FUNC_KQV }, - { "k", OFFLOAD_FUNC_KQV }, - { "kq", OFFLOAD_FUNC_KQV }, - { "kq_scaled", OFFLOAD_FUNC_KQV }, - { "kq_scaled_alibi", OFFLOAD_FUNC_KQV }, - { "kq_masked", OFFLOAD_FUNC_KQV }, - { "kq_soft_max", OFFLOAD_FUNC_KQV }, - { "kq_soft_max_ext", OFFLOAD_FUNC_KQV }, - { "v", OFFLOAD_FUNC_KQV }, - { "kqv", OFFLOAD_FUNC_KQV }, - { "kqv_merged", OFFLOAD_FUNC_KQV }, - { "kqv_merged_cont", OFFLOAD_FUNC_KQV }, - { "kqv_wo", OFFLOAD_FUNC_KQV }, - { "kqv_out", OFFLOAD_FUNC_KQV }, - - { "ffn_inp", OFFLOAD_FUNC }, - { "ffn_norm", OFFLOAD_FUNC }, - - { "ffn_up", OFFLOAD_FUNC }, - { "ffn_up_b", OFFLOAD_FUNC }, - { "ffn_gate", OFFLOAD_FUNC }, - { "ffn_gate_b", OFFLOAD_FUNC }, - { "ffn_gate_par", OFFLOAD_FUNC }, - { "ffn_act", OFFLOAD_FUNC }, - { "ffn_down", OFFLOAD_FUNC }, - { "ffn_down_b", OFFLOAD_FUNC }, - { "ffn_out", OFFLOAD_FUNC }, - - { "ffn_silu", OFFLOAD_FUNC }, - { "ffn_gelu", OFFLOAD_FUNC }, - { "ffn_relu", OFFLOAD_FUNC }, - { "ffn_sqr(relu)", OFFLOAD_FUNC }, - - { "ffn_moe_logits", OFFLOAD_FUNC }, - { "ffn_moe_probs", OFFLOAD_FUNC }, - { "ffn_moe_argsort", OFFLOAD_FUNC }, - { "ffn_moe_weights", OFFLOAD_FUNC }, - { "ffn_moe_weights_sum", OFFLOAD_FUNC }, - { "ffn_moe_weights_norm", OFFLOAD_FUNC }, - { "ffn_moe_weighted", OFFLOAD_FUNC }, - { "ffn_moe_up", OFFLOAD_FUNC }, - { "ffn_moe_gate", OFFLOAD_FUNC }, - { "ffn_moe_silu", OFFLOAD_FUNC }, - { "ffn_moe_gate_par", OFFLOAD_FUNC }, - { "ffn_moe_down", OFFLOAD_FUNC }, - { "ffn_moe_out", OFFLOAD_FUNC }, - - { "l_out", OFFLOAD_FUNC }, - - { "result_norm", OFFLOAD_FUNC_EMB }, - { "result_output_no_bias", OFFLOAD_FUNC_EMB }, - { "result_output", OFFLOAD_FUNC_OUT }, -}; - -static llm_offload_trie k_offload_func_trie(k_offload_map); - static struct ggml_cgraph * llama_build_graph( llama_context & lctx, const llama_batch & batch) { const auto & model = lctx.model; // check if we should build the worst-case graph (for memory measurement) - const bool worst_case = ggml_allocr_is_measure(lctx.alloc); + const bool worst_case = ggml_tallocr_is_measure(lctx.alloc); // keep track of the input that has already been allocated bool alloc_inp_tokens = false; @@ -6248,16 +5897,8 @@ static struct ggml_cgraph * llama_build_graph( bool alloc_inp_KQ_mask = false; bool alloc_inp_K_shift = false; -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - const bool do_offload = true; -#else - const bool do_offload = true; // TODO: set to false after finishing refactoring -#endif - - int n_non_view = 0; // number of non-view tensors that have been processed by the callback - // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.) - // TODO: will be removed with backend v2 + // TODO: improve handling of input and output tensors, then replace this with ggml_set_name llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) { if (il >= 0) { ggml_format_name(cur, "%s-%d", name, il); @@ -6268,12 +5909,11 @@ static struct ggml_cgraph * llama_build_graph( // // allocate input tensors and set input data // - // TODO: will be removed with backend v2 if (!alloc_inp_tokens && strcmp(name, "inp_tokens") == 0) { - ggml_allocr_alloc(lctx.alloc, cur); + ggml_tallocr_alloc(lctx.alloc, cur); - if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) { + if (!ggml_tallocr_is_measure(lctx.alloc) && batch.token) { const int64_t n_tokens = cur->ne[0]; ggml_backend_tensor_set(cur, batch.token, 0, n_tokens*ggml_element_size(cur)); @@ -6282,10 +5922,11 @@ static struct ggml_cgraph * llama_build_graph( alloc_inp_tokens = true; } - if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0) { - ggml_allocr_alloc(lctx.alloc, cur); + // FIXME: allocating this conditionally will result in issues with the measure allocator + if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0 && batch.embd) { + ggml_tallocr_alloc(lctx.alloc, cur); - if (!ggml_allocr_is_measure(lctx.alloc) && batch.embd) { + if (!ggml_tallocr_is_measure(lctx.alloc) && batch.embd) { const int64_t n_embd = cur->ne[0]; const int64_t n_tokens = cur->ne[1]; @@ -6296,9 +5937,9 @@ static struct ggml_cgraph * llama_build_graph( } if (!alloc_inp_pos && strcmp(name, "inp_pos") == 0) { - ggml_allocr_alloc(lctx.alloc, cur); + ggml_tallocr_alloc(lctx.alloc, cur); - if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) { + if (!ggml_tallocr_is_measure(lctx.alloc) && batch.pos) { const int64_t n_tokens = cur->ne[0]; static_assert(std::is_same::value, "llama_pos must be int32_t"); @@ -6309,9 +5950,9 @@ static struct ggml_cgraph * llama_build_graph( } if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) { - ggml_allocr_alloc(lctx.alloc, cur); + ggml_tallocr_alloc(lctx.alloc, cur); - if (!ggml_allocr_is_measure(lctx.alloc)) { + if (!ggml_tallocr_is_measure(lctx.alloc)) { const int64_t n_kv = cur->ne[0]; const int64_t n_tokens = cur->ne[1]; @@ -6349,9 +5990,9 @@ static struct ggml_cgraph * llama_build_graph( } if (!alloc_inp_K_shift && strcmp(name, "K_shift") == 0) { - ggml_allocr_alloc(lctx.alloc, cur); + ggml_tallocr_alloc(lctx.alloc, cur); - if (!ggml_allocr_is_measure(lctx.alloc)) { + if (!ggml_tallocr_is_measure(lctx.alloc)) { const int64_t n_ctx = cur->ne[0]; int32_t * data; @@ -6373,136 +6014,6 @@ static struct ggml_cgraph * llama_build_graph( alloc_inp_K_shift = true; } - - // view tensors are not processed further - if (cur->view_src != nullptr) { - return; - } - - if (cur->op != GGML_OP_NONE) { - n_non_view++; - } - - // - // offload layers - // - // TODO: will be removed with backend v2 - -//#define LLAMA_OFFLOAD_DEBUG - - if (!do_offload) { - return; - } - - const int n_layer = model.hparams.n_layer; - - const int n_gpu_layers = model.n_gpu_layers; - const int i_gpu_start = n_layer - n_gpu_layers; - - // should we offload the final norm? yes if we are not computing embeddings - const bool offload_emb = lctx.embedding.empty(); - - static const std::unordered_map> k_offload_func_name = { - { OFFLOAD_FUNC_NOP, "CPU" }, - { OFFLOAD_FUNC_OUT, "CPU" }, -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - { OFFLOAD_FUNC, "GPU (CUDA)" }, - { OFFLOAD_FUNC_FRC, "GPU (CUDA) FRC" }, - { OFFLOAD_FUNC_KQV, "GPU (CUDA) KQV" }, - { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" }, - { OFFLOAD_FUNC_EMB, "GPU (CUDA) EMB" }, -#else - { OFFLOAD_FUNC, "CPU" }, - { OFFLOAD_FUNC_FRC, "CPU" }, - { OFFLOAD_FUNC_KQV, "CPU" }, - { OFFLOAD_FUNC_NR, "CPU" }, - { OFFLOAD_FUNC_EMB, "CPU" }, -#endif // GGML_USE_CUBLAS - }; - - // check the global map for what offload function to use for this tensor - llm_offload_func_e func_e = k_offload_func_trie.find(name); - - if (func_e == OFFLOAD_FUNC_NOP) { -#ifdef LLAMA_OFFLOAD_DEBUG - // if a tensor hasn't been offloaded, we warn the user - if (worst_case) { - LLAMA_LOG_WARN("%s: %32s: not offloaded (ref: %s)\n", __func__, - cur->name, "https://github.com/ggerganov/llama.cpp/pull/3837"); - } -#endif - - return; - } - - // count the number of layers and respect the provided n_gpu_layers - switch (func_e) { - case OFFLOAD_FUNC_NOP: - case OFFLOAD_FUNC_OUT: - break; - case OFFLOAD_FUNC: - if (n_gpu_layers < n_layer) { - if (il < i_gpu_start) { - func_e = OFFLOAD_FUNC_NOP; - } - } - break; - case OFFLOAD_FUNC_FRC: - if (!lctx.cparams.offload_kqv) { - func_e = OFFLOAD_FUNC_NOP; - } break; - case OFFLOAD_FUNC_KQV: - if (!lctx.cparams.offload_kqv) { - func_e = OFFLOAD_FUNC_NOP; - } else { - if (n_gpu_layers < n_layer) { - if (il < i_gpu_start) { - func_e = OFFLOAD_FUNC_NOP; - } - } - } - break; - case OFFLOAD_FUNC_NR: - if (n_gpu_layers <= n_layer + 0) { - func_e = OFFLOAD_FUNC_NOP; - } - break; - case OFFLOAD_FUNC_EMB: - if (!offload_emb || n_gpu_layers < n_layer) { - func_e = OFFLOAD_FUNC_NOP; - } - break; - default: GGML_ASSERT(false); - } - - offload_func_t func = ggml_offload_nop; - - // this is needed for compatibility with Metal for example -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc; -#else - static offload_func_t ggml_offload_gpu = ggml_offload_nop; -#endif - - switch (func_e) { - case OFFLOAD_FUNC_NOP: - case OFFLOAD_FUNC_OUT: func = ggml_offload_nop; break; - case OFFLOAD_FUNC: - case OFFLOAD_FUNC_KQV: - case OFFLOAD_FUNC_FRC: - case OFFLOAD_FUNC_NR: - case OFFLOAD_FUNC_EMB: func = ggml_offload_gpu; break; - default: GGML_ASSERT(false); - } - - // apply offload function to the tensor - func(cur); - -#ifdef LLAMA_OFFLOAD_DEBUG - if (worst_case) { - LLAMA_LOG_INFO("%s: %32s: %s\n", __func__, cur->name, k_offload_func_name.at(func_e).c_str()); - } -#endif }; struct ggml_cgraph * result = NULL; @@ -6570,27 +6081,6 @@ static struct ggml_cgraph * llama_build_graph( llm.free(); - if (worst_case) { - int n_non_view_total = 0; - - for (int i = 0; i < result->n_nodes; ++i) { - if (result->nodes[i]->view_src == nullptr) { - n_non_view_total++; - } - } - - LLAMA_LOG_INFO("%s: non-view tensors processed: %d/%d\n", __func__, n_non_view, n_non_view_total); - - if (n_non_view != n_non_view_total) { - LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__); - LLAMA_LOG_WARN("%s: not all non-view tensors have been processed with a callback\n", __func__); - LLAMA_LOG_WARN("%s: this can indicate an inefficiency in the graph implementation\n", __func__); - LLAMA_LOG_WARN("%s: build with LLAMA_OFFLOAD_DEBUG for more info\n", __func__); - LLAMA_LOG_WARN("%s: ref: https://github.com/ggerganov/llama.cpp/pull/3837\n", __func__); - LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__); - } - } - return result; } @@ -6636,8 +6126,6 @@ static int llama_decode_internal( auto & kv_self = lctx.kv_self; - GGML_ASSERT(!!kv_self.ctx); - const int64_t n_embd = hparams.n_embd; const int64_t n_vocab = hparams.n_vocab; @@ -6691,12 +6179,8 @@ static int llama_decode_internal( //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head); - ggml_allocr_reset(lctx.alloc); - ggml_cgraph * gf = llama_build_graph(lctx, batch); - ggml_allocr_alloc_graph(lctx.alloc, gf); - // the output is always the last tensor in the graph struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; GGML_ASSERT(strcmp(res->name, "result_output") == 0); @@ -6708,30 +6192,6 @@ static int llama_decode_internal( GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0); } -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - char * buf_alloc_base = (char *)ggml_backend_buffer_get_base(lctx.buf_alloc); - for (int i = 0; i < gf->n_leafs; i++) { - ggml_tensor * node = gf->leafs[i]; - if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) { - ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base); - ggml_cuda_copy_to_device(node); - } - } - - for (int i = 0; i < gf->n_nodes; i++) { - ggml_tensor * node = gf->nodes[i]; - if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) { - ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base); - } - } - - // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed - if (!lctx.embedding.empty()) { - embeddings->backend = GGML_BACKEND_CPU; - } - res->backend = GGML_BACKEND_CPU; -#endif - // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); // for big prompts, if BLAS is enabled, it is better to use only one thread @@ -6759,10 +6219,10 @@ static int llama_decode_internal( } #endif - if (ggml_backend_is_cpu(lctx.backend)) { - ggml_backend_cpu_set_n_threads(lctx.backend, n_threads); + if (lctx.backend_cpu != nullptr) { + ggml_backend_cpu_set_n_threads(lctx.backend_cpu, n_threads); } - ggml_backend_graph_compute(lctx.backend, gf); + ggml_backend_sched_graph_compute(lctx.sched, gf); #ifdef GGML_USE_MPI ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer); @@ -9432,19 +8892,7 @@ static int llama_apply_lora_from_file_internal( ggml_tensor * dest_t = model_tensors[base_name]; - offload_func_t offload_func = ggml_offload_nop; - offload_func_t offload_func_force_inplace = ggml_offload_nop; - -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) { - if (dest_t->type != GGML_TYPE_F16) { - throw std::runtime_error(format( - "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type)); - } - offload_func = ggml_cuda_assign_buffers; - offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace; - } -#endif // GGML_USE_CUBLAS + // FIXME: ggml-backend ggml_tensor * base_t; if (ml) { @@ -9486,28 +8934,23 @@ static int llama_apply_lora_from_file_internal( // w = w + BA*s ggml_tensor * BA = ggml_mul_mat(lora_ctx.get(), loraA, loraB); - offload_func(BA); ggml_set_name(BA, "BA"); if (scaling != 1.0f) { BA = ggml_scale_inplace(lora_ctx.get(), BA, scaling); - offload_func(BA); ggml_set_name(BA, "BA_scaled"); } ggml_tensor * r; if (base_t == dest_t) { r = ggml_add_inplace(lora_ctx.get(), dest_t, BA); - offload_func_force_inplace(r); ggml_set_name(r, "r_add_inplace"); } else { r = ggml_add(lora_ctx.get(), base_t, BA); - offload_func(r); ggml_set_name(r, "r_add"); r = ggml_cpy(lora_ctx.get(), r, dest_t); - offload_func(r); ggml_set_name(r, "r_cpy"); } @@ -9743,41 +9186,40 @@ struct llama_context * llama_new_context_with_model( GGML_ASSERT(hparams.n_embd_head_k % ggml_blck_size(type_k) == 0); GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0); - // reserve memory for context buffers if (!hparams.vocab_only) { - // initialize backend + std::vector backends; + + // initialize backends + // TODO: only initialize the backends that are actually used #ifdef GGML_USE_METAL if (model->n_gpu_layers > 0) { ctx->backend = ggml_backend_metal_init(); if (ctx->backend == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__); } + backends.push_back(ctx->backend); } -#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) - // for testing only +#elif defined(GGML_USE_CUBLAS) if (model->n_gpu_layers > 0) { - ctx->backend = ggml_backend_cuda_init(0); - if (ctx->backend == nullptr) { - LLAMA_LOG_ERROR("%s: failed to initialize CUDA backend\n", __func__); + for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) { + ctx->backend = ggml_backend_cuda_init(device); + if (ctx->backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize CUDA backend for device %d\n", __func__, device); + } + backends.push_back(ctx->backend); } } #endif - - if (ctx->backend == nullptr && ggml_backend_buffer_is_host(model->buf)) { - ctx->backend = ggml_backend_cpu_init(); - if (ctx->backend == nullptr) { - LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__); - } - } - - if (ctx->backend == nullptr) { - LLAMA_LOG_ERROR("%s: failed to initialize a backend\n", __func__); + ctx->backend_cpu = ggml_backend_cpu_init(); + if (ctx->backend_cpu == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__); delete ctx; return nullptr; } + backends.push_back(ctx->backend_cpu); - if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, type_k, type_v, - cparams.n_ctx, model->n_gpu_layers, cparams.offload_kqv)) { + if (!llama_kv_cache_init(ctx->kv_self, ctx->model, type_k, type_v, + cparams.n_ctx, cparams.offload_kqv)) { LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__); llama_free(ctx); return nullptr; @@ -9813,11 +9255,11 @@ struct llama_context * llama_new_context_with_model( } { - // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data + // buffer used to store the computation graph and the tensor meta data ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead()); - // create measure allocator - ctx->alloc = ggml_allocr_new_measure_from_backend(ctx->backend); + ctx->sched = ggml_backend_sched_new(backends.data(), backends.size()); + ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu); // build worst-case graph int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch); @@ -9825,50 +9267,18 @@ struct llama_context * llama_new_context_with_model( llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0)); - // measure memory requirements for the graph - size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf); - - LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MiB\n", __func__, (ctx->buf_compute_meta.size() + alloc_size) / 1024.0 / 1024.0); - - // create allocator again with exact memory requirements - ggml_allocr_free(ctx->alloc); - - ctx->buf_alloc = ggml_backend_alloc_buffer(ctx->backend, alloc_size); - ctx->alloc = ggml_allocr_new_from_buffer(ctx->buf_alloc); -#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST) - if (model->n_gpu_layers > 0) { - // the CPU buffer adds this padding in case the malloc buffer is not aligned, so we need to do the same for the GPU buffer, since we use the same offsets - ggml_cuda_set_scratch_size(alloc_size + 64); - LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0); + // initialize scheduler with the worst-case graph + ggml_backend_sched_init_measure(ctx->sched, gf); + int n_splits = ggml_backend_sched_get_n_splits(ctx->sched); + LLAMA_LOG_INFO("%s: graph splits: %d\n", __func__, n_splits); + ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu); - // calculate total VRAM usage - auto add_tensor = [](const ggml_tensor * t, size_t & size) { - if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) { - size += ggml_nbytes(t); - } - }; - size_t model_vram_size = 0; - for (const auto & kv : model->tensors_by_name) { - add_tensor(kv.second, model_vram_size); - } - - size_t kv_vram_size = 0; - for (auto & k : ctx->kv_self.k_l) { - add_tensor(k, kv_vram_size); - } - for (auto & v : ctx->kv_self.v_l) { - add_tensor(v, kv_vram_size); - } - - size_t ctx_vram_size = alloc_size + kv_vram_size; - size_t total_vram_size = model_vram_size + ctx_vram_size; - - LLAMA_LOG_INFO("%s: total VRAM used: %.2f MiB (model: %.2f MiB, context: %.2f MiB)\n", __func__, - total_vram_size / 1024.0 / 1024.0, - model_vram_size / 1024.0 / 1024.0, - ctx_vram_size / 1024.0 / 1024.0); + for (ggml_backend_t backend : backends) { + ggml_backend_buffer_t buf = ggml_backend_sched_get_buffer(ctx->sched, backend); + LLAMA_LOG_INFO("%s: %10s compute buffer size = %.2f MiB\n", __func__, + ggml_backend_name(backend), + ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); } -#endif } } @@ -9965,9 +9375,8 @@ int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int3 } int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) { - return snprintf(buf, buf_size, "%s %s%s %s", + return snprintf(buf, buf_size, "%s %s %s", llama_model_arch_name(model->arch).c_str(), - model->hparams.n_expert > 0 ? (std::to_string(model->hparams.n_expert) + "x").c_str() : "", llama_model_type_name(model->type), llama_model_ftype_name(model->ftype).c_str()); } @@ -9989,7 +9398,14 @@ uint64_t llama_model_n_params(const struct llama_model * model) { } struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) { - return ggml_get_tensor(model->ctx, name); + auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(), + [name](const std::pair & it) { + return it.first == name; + }); + if (it == model->tensors_by_name.end()) { + return nullptr; + } + return it->second; } uint32_t llama_model_quantize( @@ -10162,7 +9578,7 @@ size_t llama_get_state_size(const struct llama_context * ctx) { const size_t s_embedding = ctx->embedding.size() * sizeof(float); const size_t s_kv_size = sizeof(size_t); const size_t s_kv_ntok = sizeof(int); - const size_t s_kv = ggml_backend_buffer_get_size(ctx->kv_self.buf); + const size_t s_kv = ggml_backend_buffer_get_size(ctx->kv_self.bufs.at(0)); // FIXME const size_t s_total = ( + s_rng_size @@ -10291,7 +9707,7 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat const auto n_embd_v_gqa = hparams.n_embd_v_gqa(); const auto n_ctx = cparams.n_ctx; - const size_t kv_buf_size = ggml_backend_buffer_get_size(kv_self.buf); + const size_t kv_buf_size = ggml_backend_buffer_get_size(kv_self.bufs.at(0)); // FIXME const uint32_t kv_head = kv_self.head; const uint32_t kv_size = kv_self.size; const uint32_t kv_used = kv_self.used; @@ -10442,7 +9858,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used); if (kv_buf_size) { - GGML_ASSERT(ggml_backend_buffer_get_size(kv_self.buf) == kv_buf_size); + GGML_ASSERT(ggml_backend_buffer_get_size(kv_self.bufs.at(0)) == kv_buf_size); // FIXME const size_t elt_size = ggml_element_size(kv_self.k_l[0]); From 6483328fa9f1d4ca178108f50013c2059541937f Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 5 Jan 2024 02:49:38 +0100 Subject: [PATCH 02/34] ggml-backend : add names to buffers --- ggml-backend-impl.h | 20 ++-- ggml-backend.c | 252 ++++++++++++++++++++++---------------------- ggml-backend.h | 51 +++++---- ggml-cuda.cu | 70 +++++++++--- ggml-metal.m | 20 +++- llama.cpp | 29 ++--- 6 files changed, 248 insertions(+), 194 deletions(-) diff --git a/ggml-backend-impl.h b/ggml-backend-impl.h index aa2587a196f89..ab6d96c681e94 100644 --- a/ggml-backend-impl.h +++ b/ggml-backend-impl.h @@ -16,6 +16,7 @@ extern "C" { typedef void * ggml_backend_buffer_type_context_t; struct ggml_backend_buffer_type_i { + const char * (*get_name) (ggml_backend_buffer_type_t buft); ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size); size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding @@ -34,16 +35,17 @@ extern "C" { typedef void * ggml_backend_buffer_context_t; struct ggml_backend_buffer_i { - void (*free_buffer) (ggml_backend_buffer_t buffer); - //void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras - void * (*get_base) (ggml_backend_buffer_t buffer); - void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + const char * (*get_name) (ggml_backend_buffer_t buffer); + void (*free_buffer) (ggml_backend_buffer_t buffer); + //void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras + void * (*get_base) (ggml_backend_buffer_t buffer); + void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); // (optional) copy tensor between different buffer-type, allow for single-copy tranfers - void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*clear) (ggml_backend_buffer_t buffer, uint8_t value); + void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*clear) (ggml_backend_buffer_t buffer, uint8_t value); }; struct ggml_backend_buffer { diff --git a/ggml-backend.c b/ggml-backend.c index 10c69c5711746..ee8e72560a358 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -15,6 +15,10 @@ // backend buffer type +const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) { + return buft->iface.get_name(buft); +} + ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { return buft->iface.alloc_buffer(buft, size); } @@ -64,6 +68,10 @@ ggml_backend_buffer_t ggml_backend_buffer_init( return buffer; } +const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) { + return buffer->iface.get_name(buffer); +} + void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) { if (buffer == NULL) { return; @@ -397,6 +405,12 @@ ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size) { // backend CPU +static const char * ggml_backend_cpu_buffer_name(ggml_backend_buffer_t buffer) { + return "CPU"; + + GGML_UNUSED(buffer); +} + static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) { return (void *)buffer->context; } @@ -434,6 +448,7 @@ static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t } static struct ggml_backend_buffer_i cpu_backend_buffer_i = { + /* .get_name = */ ggml_backend_cpu_buffer_name, /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer, /* .get_base = */ ggml_backend_cpu_buffer_get_base, /* .init_tensor = */ NULL, // no initialization required @@ -446,6 +461,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i = { // for buffers from ptr, free is not called static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = { + /* .get_name = */ ggml_backend_cpu_buffer_name, /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed /* .get_base = */ ggml_backend_cpu_buffer_get_base, /* .init_tensor = */ NULL, // no initialization required @@ -458,6 +474,12 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = { static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512 +static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "CPU"; + + GGML_UNUSED(buft); +} + static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC? @@ -488,6 +510,7 @@ static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) { static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = { /* .iface = */ { + /* .get_name = */ ggml_backend_cpu_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes @@ -506,6 +529,18 @@ ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) { #include +static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "CPU_HBM"; + + GGML_UNUSED(buft); +} + +static const char * ggml_backend_cpu_hbm_buffer_get_name(ggml_backend_buffer_t buf) { + return "CPU_HBM"; + + GGML_UNUSED(buf); +} + static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) { hbw_free(buffer->context); } @@ -522,14 +557,16 @@ static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_ // FIXME: this is a hack to avoid having to implement a new buffer type ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); buffer->buft = buft; + buffer->iface.get_name = ggml_backend_cpu_hbm_buffer_get_name; buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer; return buffer; } -ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type() { +ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) { static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = { /* .iface = */ { + /* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment, /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes @@ -795,6 +832,7 @@ static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct // if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there // ie. kv cache updates // note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend. + // dst ggml_backend_t cur_backend = get_buffer_backend(sched, node->buffer); if (cur_backend != NULL) { @@ -809,7 +847,6 @@ static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct } // src - int cur_prio = INT_MAX; size_t cur_size = 0; for (int i = 0; i < GGML_MAX_SRC; i++) { @@ -826,16 +863,12 @@ static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct break; } - //if (src_backend != NULL) { - int src_prio = sched_backend_prio(sched, src_backend); - size_t src_size = ggml_nbytes(src); - if (/*src_prio < cur_prio &&*/ src_size >= cur_size) { - cur_prio = src_prio; - cur_size = src_size; - cur_backend = src_backend; - SET_CAUSE(node, "1.src%d", i); - } - //} + size_t src_size = ggml_nbytes(src); + if (src_size >= cur_size) { + cur_size = src_size; + cur_backend = src_backend; + SET_CAUSE(node, "1.src%d", i); + } } return cur_backend; } @@ -946,55 +979,22 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } //printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); -#if 0 - // pass 2: assign backends to ops from current assignments - // TODO: - // - reuse sched_backend_from_cur - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - ggml_tallocr_t node_allocr = node_allocr(node); - if (node_allocr == NULL) { - int cur_prio = INT_MAX; - size_t cur_size = 0; - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * src = node->src[j]; - if (src == NULL) { - break; - } - ggml_tallocr_t src_allocr = node_allocr(src); - if (src_allocr != NULL) { - int src_prio = sched_allocr_prio(sched, src_allocr); - size_t src_size = ggml_nbytes(src); - if (src_prio < cur_prio && src_size >= cur_size) { - cur_prio = src_prio; - cur_size = src_size; - node_allocr = src_allocr; - SET_CAUSE(node, "2.src%d", j); - } - } - } - if (node_allocr != NULL) { - node_allocr(node) = node_allocr; - } - } - } -#else // pass 2: assign backends to ops from current assignments // start from the end and assign the same backend to previous ops { - ggml_tallocr_t cur_allocr = NULL; - for (int i = graph->n_nodes - 1; i >= 0; i--) { - struct ggml_tensor * node = graph->nodes[i]; - ggml_tallocr_t node_allocr = node_allocr(node); - if (node_allocr != NULL) { - cur_allocr = node_allocr; - } else { - node_allocr(node) = cur_allocr; - SET_CAUSE(node, "2.cur"); + ggml_tallocr_t cur_allocr = NULL; + for (int i = graph->n_nodes - 1; i >= 0; i--) { + struct ggml_tensor * node = graph->nodes[i]; + ggml_tallocr_t node_allocr = node_allocr(node); + if (node_allocr != NULL) { + cur_allocr = node_allocr; + } else { + node_allocr(node) = cur_allocr; + SET_CAUSE(node, "2.cur"); + } } } - } -#endif + //printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); // pass 3: assign backends to remaining src from dst (should only be leafs) @@ -1015,86 +1015,85 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g //printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); // pass 4: split graph, find tensors that need to be copied - // TODO: - // - when switching from a less preferred backend to a more preferred backend, check if it is possible to move the switch to an earlier point for the same cost - // find first backend - int cur_split = 0; - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - if (node->view_src == NULL) { - sched->splits[0].tallocr = node_allocr(node); - break; - } - } - sched->splits[0].i_start = 0; - sched->splits[0].n_inputs = 0; - memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK - ggml_tallocr_t cur_allocr = sched->splits[0].tallocr; - size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr); - for (int i = 0; i < graph->n_nodes; i++) { - struct ggml_tensor * node = graph->nodes[i]; - - if (ggml_is_view_op(node->op)) { - continue; - } - - ggml_tallocr_t node_allocr = node_allocr(node); - - if (node_allocr != cur_allocr) { - sched->splits[cur_split].i_end = i; - cur_split++; - GGML_ASSERT(cur_split < GGML_MAX_SPLITS); - sched->splits[cur_split].tallocr = node_allocr; - sched->splits[cur_split].i_start = i; - sched->splits[cur_split].n_inputs = 0; - memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK - cur_allocr = node_allocr; - cur_backend_id = sched_allocr_prio(sched, cur_allocr); + { + int cur_split = 0; + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + if (node->view_src == NULL) { + sched->splits[0].tallocr = node_allocr(node); + break; + } } + sched->splits[0].i_start = 0; + sched->splits[0].n_inputs = 0; + memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK + ggml_tallocr_t cur_allocr = sched->splits[0].tallocr; + size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr); + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + + if (ggml_is_view_op(node->op)) { + continue; + } - // find inputs that are not on the same backend - for (int j = 0; j < GGML_MAX_SRC; j++) { - struct ggml_tensor * src = node->src[j]; - if (src == NULL) { - break; + ggml_tallocr_t node_allocr = node_allocr(node); + + if (node_allocr != cur_allocr) { + sched->splits[cur_split].i_end = i; + cur_split++; + GGML_ASSERT(cur_split < GGML_MAX_SPLITS); + sched->splits[cur_split].tallocr = node_allocr; + sched->splits[cur_split].i_start = i; + sched->splits[cur_split].n_inputs = 0; + memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK + cur_allocr = node_allocr; + cur_backend_id = sched_allocr_prio(sched, cur_allocr); } - ggml_tallocr_t src_allocr = node_allocr(src); - if (src_allocr != node_allocr) { - // check if the input is already in the split - bool found = false; - for (int k = 0; k < sched->splits[cur_split].n_inputs; k++) { - if (sched->splits[cur_split].inputs[k] == src) { - found = true; - break; - } - } - if (!found) { - int n_inputs = sched->splits[cur_split].n_inputs++; - //printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr))); - GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS); - sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src; + // find inputs that are not on the same backend + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + break; } + ggml_tallocr_t src_allocr = node_allocr(src); + if (src_allocr != node_allocr) { + // check if the input is already in the split + bool found = false; + for (int k = 0; k < sched->splits[cur_split].n_inputs; k++) { + if (sched->splits[cur_split].inputs[k] == src) { + found = true; + break; + } + } + + if (!found) { + int n_inputs = sched->splits[cur_split].n_inputs++; + //printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr))); + GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS); + sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src; + } - // create copies - size_t id = hash_id(src); - if (sched->node_copies[id][cur_backend_id] == NULL) { - struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); - sched->node_copies[id][cur_backend_id] = tensor_copy; - node_allocr(tensor_copy) = cur_allocr; - ggml_backend_t backend = get_allocr_backend(sched, cur_allocr); - ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name); + // create copies + size_t id = hash_id(src); + if (sched->node_copies[id][cur_backend_id] == NULL) { + struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); + sched->node_copies[id][cur_backend_id] = tensor_copy; + node_allocr(tensor_copy) = cur_allocr; + ggml_backend_t backend = get_allocr_backend(sched, cur_allocr); + ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name); + } + node->src[j] = sched->node_copies[id][cur_backend_id]; } - node->src[j] = sched->node_copies[id][cur_backend_id]; } } + sched->splits[cur_split].i_end = graph->n_nodes; + sched->n_splits = cur_split + 1; } - sched->splits[cur_split].i_end = graph->n_nodes; - sched->n_splits = cur_split + 1; //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout); -#if 1 +#ifndef NDEBUG // sanity check: all sources should have the same backend as the node for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; @@ -1269,10 +1268,13 @@ void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgr sched_reset(sched); } -void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { +void ggml_backend_sched_graph_split(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS); - sched_split_graph(sched, graph); +} + +void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { + ggml_backend_sched_graph_split(sched, graph); sched_alloc_splits(sched); sched_compute_splits(sched); sched_reset(sched); diff --git a/ggml-backend.h b/ggml-backend.h index 8649ab766f2e7..fc1e5cd0325d9 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -17,11 +17,12 @@ extern "C" { // // buffer type - GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size); - GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft); - GGML_API size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); - GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend); - GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft); + GGML_API const char * ggml_backend_buft_name (ggml_backend_buffer_type_t buft); + GGML_API ggml_backend_buffer_t ggml_backend_buft_alloc_buffer (ggml_backend_buffer_type_t buft, size_t size); + GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft); + GGML_API size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); + GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend); + GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft); // buffer enum ggml_backend_buffer_usage { @@ -29,16 +30,17 @@ extern "C" { GGML_BACKEND_BUFFER_USAGE_WEIGHTS = 1, }; - GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); - GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); - GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); - GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); - GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); - GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); - GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer); + GGML_API const char * ggml_backend_buffer_name (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer); + GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer); + GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer); + GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); + GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); + GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_type (ggml_backend_buffer_t buffer); // @@ -147,24 +149,27 @@ extern "C" { typedef struct ggml_backend_sched * ggml_backend_sched_t; // Initialize a backend scheduler - GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends); - - GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); - + GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends); + GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); // Initialize backend buffers from a measure graph - GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); - GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched); + GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); + // Get the number of splits of the last graph + GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched); GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend); GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend); - GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend); + GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend); - // Allocate a graph on the backend scheduler + // Allocate and compute graph on the backend scheduler GGML_API void ggml_backend_sched_graph_compute( ggml_backend_sched_t sched, struct ggml_cgraph * graph); + // Split without computing - only useful to find the number of splits + GGML_API void ggml_backend_sched_graph_split( + ggml_backend_sched_t sched, + struct ggml_cgraph * graph); // // Utils diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 2af16887c4a2e..a17eeb2936c52 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -8,6 +8,7 @@ #include #include #include +#include #include @@ -77,6 +78,7 @@ #define cudaMemcpyKind hipMemcpyKind #define cudaMemset hipMemset #define cudaMemsetAsync hipMemsetAsync +#define cudaMemGetInfo hipMemGetInfo #define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize #define cudaSetDevice hipSetDevice #define cudaStreamCreateWithFlags hipStreamCreateWithFlags @@ -6885,6 +6887,8 @@ void ggml_init_cublas() { } } +// TODO: cleanup this after the split buffer type is implemented +#if 0 void ggml_cuda_set_tensor_split(const float * tensor_split) { if (tensor_split == nullptr) { return; @@ -6908,6 +6912,7 @@ void ggml_cuda_set_tensor_split(const float * tensor_split) { g_tensor_split[i] /= split_sum; } } +#endif void * ggml_cuda_host_malloc(size_t size) { if (getenv("GGML_CUDA_NO_PINNED") != nullptr) { @@ -9349,10 +9354,9 @@ void ggml_cuda_get_device_description(int device, char * description, size_t des struct ggml_backend_context_cuda { int device; - char name[128]; + std::string name; }; - // cuda buffer struct ggml_backend_buffer_context_cuda { @@ -9360,8 +9364,12 @@ struct ggml_backend_buffer_context_cuda { void * dev_ptr = nullptr; ggml_tensor_extra_gpu * temp_tensor_extras = nullptr; size_t temp_tensor_extra_index = 0; + std::string name; - ggml_backend_buffer_context_cuda(int device, void * dev_ptr) : device(device), dev_ptr(dev_ptr) {} + ggml_backend_buffer_context_cuda(int device, void * dev_ptr) : + device(device), dev_ptr(dev_ptr), + name(GGML_CUDA_NAME + std::to_string(device)) { + } ~ggml_backend_buffer_context_cuda() { delete[] temp_tensor_extras; @@ -9381,6 +9389,11 @@ struct ggml_backend_buffer_context_cuda { } }; +static const char * ggml_backend_cuda_buffer_get_name(ggml_backend_buffer_t buffer) { + ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + return ctx->name.c_str(); +} + static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; CUDA_CHECK(cudaFree(ctx->dev_ptr)); @@ -9457,6 +9470,7 @@ static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t } static struct ggml_backend_buffer_i cuda_backend_buffer_interface = { + /* .get_name = */ ggml_backend_cuda_buffer_get_name, /* .free_buffer = */ ggml_backend_cuda_buffer_free_buffer, /* .get_base = */ ggml_backend_cuda_buffer_get_base, /* .init_tensor = */ ggml_backend_cuda_buffer_init_tensor, @@ -9469,21 +9483,32 @@ static struct ggml_backend_buffer_i cuda_backend_buffer_interface = { // cuda buffer type +struct ggml_backend_cuda_buffer_type_context { + int device; + std::string name; +}; + +static const char * ggml_backend_cuda_buffer_type_name(ggml_backend_buffer_type_t buft) { + ggml_backend_cuda_buffer_type_context * ctx = (ggml_backend_cuda_buffer_type_context *)buft->context; + + return ctx->name.c_str(); +} + static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { - int device = (int) (intptr_t) buft->context; + ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context; - ggml_cuda_set_device(device); + ggml_cuda_set_device(buft_ctx->device); size = std::max(size, (size_t)1); // cudaMalloc returns null for size 0 void * dev_ptr; cudaError_t err = cudaMalloc(&dev_ptr, size); if (err != cudaSuccess) { - fprintf(stderr, "%s: allocating %.2f MiB on device %d: cudaMalloc failed: %s\n", __func__, size/1024.0/1024.0, device, cudaGetErrorString(err)); + fprintf(stderr, "%s: allocating %.2f MiB on device %d: cudaMalloc failed: %s\n", __func__, size/1024.0/1024.0, buft_ctx->device, cudaGetErrorString(err)); return nullptr; } - ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(device, dev_ptr); + ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(buft_ctx->device, dev_ptr); return ggml_backend_buffer_init(buft, cuda_backend_buffer_interface, ctx, size); } @@ -9519,13 +9544,14 @@ static bool ggml_backend_cuda_buffer_type_supports_backend(ggml_backend_buffer_t return false; } - int device = (int) (intptr_t) buft->context; + ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context; ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; - return device == cuda_ctx->device; + return buft_ctx->device == cuda_ctx->device; } static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { + /* .get_name = */ ggml_backend_cuda_buffer_type_name, /* .alloc_buffer = */ ggml_backend_cuda_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment, /* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size, @@ -9542,7 +9568,7 @@ ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { for (int i = 0; i < GGML_CUDA_MAX_DEVICES; i++) { ggml_backend_cuda_buffer_types[i] = { /* .iface = */ ggml_backend_cuda_buffer_type_interface, - /* .context = */ (ggml_backend_buffer_type_context_t) (intptr_t) i, + /* .context = */ new ggml_backend_cuda_buffer_type_context{i, GGML_CUDA_NAME + std::to_string(i)}, }; } ggml_backend_cuda_buffer_type_initialized = true; @@ -9553,6 +9579,18 @@ ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { // host buffer type +static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_type_t buft) { + return "CUDA_Host"; + + UNUSED(buft); +} + +static const char * ggml_backend_cuda_host_buffer_name(ggml_backend_buffer_t buffer) { + return "CUDA_Host"; + + UNUSED(buffer); +} + static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_cuda_host_free(buffer->context); } @@ -9568,6 +9606,7 @@ static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggm // FIXME: this is a hack to avoid having to implement a new buffer type ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); buffer->buft = buft; + buffer->iface.get_name = ggml_backend_cuda_host_buffer_name; buffer->iface.free_buffer = ggml_backend_cuda_host_buffer_free_buffer; return buffer; @@ -9576,6 +9615,7 @@ static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggm ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() { static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_type_host = { /* .iface = */ { + /* .get_name = */ ggml_backend_cuda_host_buffer_type_name, /* .alloc_buffer = */ ggml_backend_cuda_host_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, @@ -9591,12 +9631,9 @@ ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() { // backend static const char * ggml_backend_cuda_name(ggml_backend_t backend) { - //return GGML_CUDA_NAME; - //UNUSED(backend); ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; - // TODO: on init - sprintf(cuda_ctx->name, "%s%d", "CUD", cuda_ctx->device); - return cuda_ctx->name; + + return cuda_ctx->name.c_str(); } static void ggml_backend_cuda_free(ggml_backend_t backend) { @@ -9861,7 +9898,8 @@ ggml_backend_t ggml_backend_cuda_init(int device) { ggml_cuda_set_main_device(device); ggml_backend_context_cuda * ctx = new ggml_backend_context_cuda { - /* .device = */ device + /* .device = */ device, + /* .name = */ GGML_CUDA_NAME + std::to_string(device), }; ggml_backend_t cuda_backend = new ggml_backend { diff --git a/ggml-metal.m b/ggml-metal.m index fbbdcd8c46726..e3d76b9d3d332 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2442,10 +2442,10 @@ static void ggml_backend_metal_free_device(void) { } } -static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { - struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context; +static const char * ggml_backend_metal_buffer_get_name(ggml_backend_buffer_t buffer) { + return "Metal"; - return ctx->all_data; + UNUSED(buffer); } static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) { @@ -2463,6 +2463,12 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) free(ctx); } +static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { + struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context; + + return ctx->all_data; +} + static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { memcpy((char *)tensor->data + offset, data, size); @@ -2494,6 +2500,7 @@ static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_ } static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = { + /* .get_name = */ ggml_backend_metal_buffer_get_name, /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer, /* .get_base = */ ggml_backend_metal_buffer_get_base, /* .init_tensor = */ NULL, @@ -2506,6 +2513,12 @@ static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_ // default buffer type +static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) { + return "Metal"; + + UNUSED(buft); +} + static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context)); @@ -2578,6 +2591,7 @@ static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t bu ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) { static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = { /* .iface = */ { + /* .get_name = */ ggml_backend_metal_buffer_type_get_name, /* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer, /* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment, /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes diff --git a/llama.cpp b/llama.cpp index 266f2b1f81f93..12ec49ebc30ef 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1731,7 +1731,7 @@ static bool llama_kv_cache_init( } ggml_backend_buffer_clear(buf, 0); // FIXME: buffer type name - LLAMA_LOG_INFO("%s: KV %10s buffer size: %.02f MiB\n", __func__, "???", ggml_backend_buffer_get_size(buf)/1024.0/1024.0); + LLAMA_LOG_INFO("%s: %10s KV buffer size = %7.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); cache.bufs.push_back(buf); } @@ -3169,12 +3169,13 @@ static bool llm_load_tensors( #ifdef GGML_USE_CUBLAS if (split_mode == CUDA_SPLIT_LAYER) { + // calculate the split points int device_count = ggml_backend_cuda_get_device_count(); float splits[GGML_CUDA_MAX_DEVICES]; std::copy(tensor_split, tensor_split + device_count, splits); bool all_zero = std::all_of(splits, splits + device_count, [](float x) { return x == 0.0f; }); if (all_zero) { - // set by free memory + // default split, by free memory for (int i = 0; i < device_count; ++i) { size_t total; size_t free; @@ -3189,21 +3190,17 @@ static bool llm_load_tensors( } for (int i = 0; i < device_count; ++i) { splits[i] /= split_sum; - printf("split[%d] = %.2f\n", i, splits[i]); } + // assign GPU layers according to the splits to the devices int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1); - - // assign layers proportionally, in reverse order for (int64_t i = i_gpu_start; i < n_layer; ++i) { int layer_gpu = std::upper_bound(splits, splits + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits; - printf("layer %d -> gpu %d\n", (int)i, layer_gpu); model.buft_layer[i] = llama_default_buffer_type_offload(layer_gpu); } // output layer if (n_gpu_layers > n_layer) { int layer_gpu = std::upper_bound(splits, splits + device_count, float(n_layer)/act_gpu_layers) - splits; - printf("output -> gpu %d\n", layer_gpu); model.buft_output = llama_default_buffer_type_offload(layer_gpu); } else { model.buft_output = llama_default_buffer_type_cpu(true); @@ -3250,7 +3247,7 @@ static bool llm_load_tensors( model.ctxs.push_back(ctx); } - LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0); + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, model.ctxs.size()*ctx_size/1024.0/1024.0); // create tensors for the weights { @@ -3764,14 +3761,7 @@ static bool llm_load_tensors( LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); for (ggml_backend_buffer_t buf : model.bufs) { - // FIXME: add buffer type names to ggml-backend - const char * name; - if (ggml_backend_buffer_type(buf) == ggml_backend_cpu_buffer_type()) { - name = "CPU"; - } else { - name = "???"; - } - LLAMA_LOG_INFO("%s: %10s buffer size = %7.2f MiB\n", __func__, name, ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); + LLAMA_LOG_INFO("%s: %10s buffer size = %7.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); } } @@ -6224,6 +6214,8 @@ static int llama_decode_internal( } ggml_backend_sched_graph_compute(lctx.sched, gf); + // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched)); + #ifdef GGML_USE_MPI ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer); #endif @@ -9269,13 +9261,14 @@ struct llama_context * llama_new_context_with_model( // initialize scheduler with the worst-case graph ggml_backend_sched_init_measure(ctx->sched, gf); + // note: the number of splits during measure is higher than during inference due to the kv shift int n_splits = ggml_backend_sched_get_n_splits(ctx->sched); - LLAMA_LOG_INFO("%s: graph splits: %d\n", __func__, n_splits); + LLAMA_LOG_INFO("%s: graph splits (measure): %d\n", __func__, n_splits); ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu); for (ggml_backend_t backend : backends) { ggml_backend_buffer_t buf = ggml_backend_sched_get_buffer(ctx->sched, backend); - LLAMA_LOG_INFO("%s: %10s compute buffer size = %.2f MiB\n", __func__, + LLAMA_LOG_INFO("%s: %10s compute buffer size = %7.2f MiB\n", __func__, ggml_backend_name(backend), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); } From a1ab35c682cb5835ae110b9f01232b766a1136ce Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 5 Jan 2024 03:14:06 +0100 Subject: [PATCH 03/34] fix unmap after loading --- llama.cpp | 84 ++++++++++++++++++++++++------------------------------- 1 file changed, 37 insertions(+), 47 deletions(-) diff --git a/llama.cpp b/llama.cpp index 12ec49ebc30ef..f7c581ca06a51 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2401,7 +2401,7 @@ struct llama_model_loader { return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx); } - void init_mapping(bool prefetch = true) { + void init_mapping(bool prefetch = true, llama_mlock * lmlock = nullptr) { /* // prefetch only CPU tensors if (use_mmap) { @@ -2421,6 +2421,18 @@ struct llama_model_loader { if (use_mmap) { mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa())); } + + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { + struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i)); + size_data += ggml_nbytes(cur); + } + + if (use_mmap && mapping) { + if (lmlock) { + lmlock->init(mapping->addr); + } + mmap_used_first = mapping->size; + } } // for backwards compatibility, does not support ggml-backend @@ -2439,29 +2451,15 @@ struct llama_model_loader { size_t size_done = 0; size_t size_data = 0; - size_t mmap_first = -1; - size_t mmap_last = 0; + size_t mmap_used_first = -1; + size_t mmap_used_last = 0; // Returns false if cancelled by progress_callback bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) { - // TODO: move to a better place - if (size_data == 0) { - for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { - struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i)); - size_data += ggml_nbytes(cur); - } - - if (use_mmap && buf_mmap) { - // FIXME - //if (lmlock) { - // lmlock->init(mapping->addr); - //} - } - } + GGML_ASSERT(size_data != 0 && "call init_mapping() first"); std::vector> read_buf; - for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); if (!cur) { @@ -2477,15 +2475,14 @@ struct llama_model_loader { const size_t offs = file_offset(ggml_get_name(cur)); - // FIXME if (use_mmap && mapping) { if (buf_mmap && cur->data == nullptr) { ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs); if (lmlock) { lmlock->grow_to(offs + ggml_nbytes(cur)); } - mmap_first = std::min(mmap_first, offs); - mmap_last = std::max(mmap_last, offs + ggml_nbytes(cur)); + mmap_used_first = std::min(mmap_used_first, offs); + mmap_used_last = std::max(mmap_used_last, offs + ggml_nbytes(cur)); } else { ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur)); } @@ -2504,20 +2501,23 @@ struct llama_model_loader { size_done += ggml_nbytes(cur); } - if (progress_callback && size_done >= size_data) { - // Even though the model is done loading, we still honor - // cancellation since we need to free allocations. - return progress_callback(1.0f, progress_callback_user_data); + // check if this is the last call and do final cleanup + if (size_done >= size_data) { + // unmap offloaded tensors and metadata + if (use_mmap && mapping) { + mapping->unmap_fragment(0, mmap_used_first); + if (mmap_used_last != 0) { + mapping->unmap_fragment(mmap_used_last, mapping->size); + } + } + if (progress_callback) { + // Even though the model is done loading, we still honor + // cancellation since we need to free allocations. + return progress_callback(1.0f, progress_callback_user_data); + } } - return true; - } - void unmap_fragments() { - // unmap offloaded tensors and metadata - if (use_mmap && mapping) { - mapping->unmap_fragment(0, mmap_first); - mapping->unmap_fragment(mmap_last, mapping->size); - } + return true; } }; @@ -3700,16 +3700,7 @@ static bool llm_load_tensors( ml.done_getting_tensors(); - ml.init_mapping(); - - // TODO: move to ggml - //auto ggml_n_tensors = [](struct ggml_context * ctx) { - // int n = 0; - // for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) { - // ++n; - // } - // return n; - //}; + ml.init_mapping(true, use_mlock ? &model.mlock_mmap : nullptr); // create backend buffers @@ -3720,9 +3711,9 @@ static bool llm_load_tensors( ggml_context * ctx = it.second; ggml_backend_buffer_t buf = nullptr; - // TODO: do not use whole model mapping for the buffer, only the region containing the tensors - // this is important for metal: if the entire model could be mapped, then we could use metal for all layers - if (ml.use_mmap && buft == ggml_backend_cpu_buffer_type()) { + // TODO: do not use the whole model mapping for the buffer, only the region containing the tensors + // this is important for metal: if the entire model could be mapped to a metal buffer, then we could use metal for all layers + if (ml.use_mmap && buft == llama_default_buffer_type_cpu(true)) { buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size); } #ifdef GGML_USE_METAL @@ -3780,7 +3771,6 @@ static bool llm_load_tensors( return false; } } - ml.unmap_fragments(); model.mapping = std::move(ml.mapping); From 1fa7ee2e51cd32b477aa854e766bb701525ef621 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 5 Jan 2024 10:31:15 +0200 Subject: [PATCH 04/34] batched-bench : add tensor_split param --- examples/batched-bench/batched-bench.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index 57596ed986050..7924db267401c 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -88,7 +88,10 @@ int main(int argc, char ** argv) { llama_model_params model_params = llama_model_default_params(); + const std::vector t_split (LLAMA_MAX_DEVICES, 0.0f); + model_params.n_gpu_layers = n_gpu_layers; + model_params.tensor_split = t_split.data(); llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params); From 863ef45539f59577bdd4fa4daa2d0729b704aab9 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 5 Jan 2024 10:57:05 +0100 Subject: [PATCH 05/34] llama : check for null tensor_split --- llama.cpp | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/llama.cpp b/llama.cpp index f7c581ca06a51..d8fe1554f6f2d 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1711,7 +1711,7 @@ static bool llama_kv_cache_init( cache.v_l.reserve(n_layer); for (int i = 0; i < (int) n_layer; i++) { - struct ggml_context * ctx = offload ? ctx_map[model.buft_layer[i].buft] : cache.ctxs.front(); + struct ggml_context * ctx = offload ? ctx_map.at(model.buft_layer[i].buft) : cache.ctxs.front(); ggml_tensor * k = ggml_new_tensor_1d(ctx, ktype, n_embd_k_gqa*n_ctx); ggml_tensor * v = ggml_new_tensor_1d(ctx, vtype, n_embd_v_gqa*n_ctx); ggml_format_name(k, "cache_k_l%d", i); @@ -1731,7 +1731,7 @@ static bool llama_kv_cache_init( } ggml_backend_buffer_clear(buf, 0); // FIXME: buffer type name - LLAMA_LOG_INFO("%s: %10s KV buffer size = %7.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); + LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); cache.bufs.push_back(buf); } @@ -3172,8 +3172,10 @@ static bool llm_load_tensors( // calculate the split points int device_count = ggml_backend_cuda_get_device_count(); float splits[GGML_CUDA_MAX_DEVICES]; - std::copy(tensor_split, tensor_split + device_count, splits); - bool all_zero = std::all_of(splits, splits + device_count, [](float x) { return x == 0.0f; }); + if (tensor_split != nullptr) { + std::copy(tensor_split, tensor_split + device_count, splits); + } + bool all_zero = tensor_split == nullptr || std::all_of(splits, splits + device_count, [](float x) { return x == 0.0f; }); if (all_zero) { // default split, by free memory for (int i = 0; i < device_count; ++i) { @@ -3752,7 +3754,7 @@ static bool llm_load_tensors( LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); for (ggml_backend_buffer_t buf : model.bufs) { - LLAMA_LOG_INFO("%s: %10s buffer size = %7.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); + LLAMA_LOG_INFO("%s: %10s buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); } } @@ -9258,7 +9260,7 @@ struct llama_context * llama_new_context_with_model( for (ggml_backend_t backend : backends) { ggml_backend_buffer_t buf = ggml_backend_sched_get_buffer(ctx->sched, backend); - LLAMA_LOG_INFO("%s: %10s compute buffer size = %7.2f MiB\n", __func__, + LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, ggml_backend_name(backend), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); } From d1074593214e76f38edc460a57cee5bf6d529bb0 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 5 Jan 2024 10:57:30 +0100 Subject: [PATCH 06/34] ggml-backend : increase GGML_MAX_BACKENDS --- ggml-backend.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index ee8e72560a358..8e791827255e0 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -727,7 +727,7 @@ static ggml_backend_t ggml_backend_reg_cpu_init(const char * params, void * user // scheduler -#define GGML_MAX_BACKENDS 4 +#define GGML_MAX_BACKENDS 16 #define GGML_MAX_SPLITS 256 #define GGML_MAX_SPLIT_INPUTS 16 @@ -903,7 +903,7 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra } ggml_tallocr_t node_allocr = node_allocr(node); ggml_backend_t node_backend = node_allocr ? get_allocr_backend(sched, node_allocr) : NULL; // FIXME: - fprintf(stderr, "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:", i, ggml_op_name(node->op), node->name, + fprintf(stderr, "node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name, fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", GET_CAUSE(node)); for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; @@ -912,7 +912,7 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra } ggml_tallocr_t src_allocr = node_allocr(src); ggml_backend_t src_backend = src_allocr ? get_allocr_backend(sched, src_allocr) : NULL; - fprintf(stderr, " %20.20s (%4.4s) [%4.4s %8.8s]", src->name, + fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name, fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src)); } fprintf(stderr, "\n"); @@ -1091,7 +1091,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g sched->n_splits = cur_split + 1; } - //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout); + //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); #ifndef NDEBUG // sanity check: all sources should have the same backend as the node From ece0b0d855eb5c53ec9838f6a57b6afc6c6ba573 Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 6 Jan 2024 00:58:22 +0100 Subject: [PATCH 07/34] improve graph splitting, partial fix for --no-kv-offload --- ggml-backend.c | 132 +++++++++++++++++++++++++++++++++++++++++++------ ggml-cuda.cu | 25 +--------- llama.cpp | 11 ++++- 3 files changed, 127 insertions(+), 41 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 8e791827255e0..31516b4c55efe 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -737,9 +737,16 @@ struct ggml_backend_sched_split { int i_end; struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS]; int n_inputs; + // graph view of this split struct ggml_cgraph graph; }; +// TODO: group all the hash values into a single struct for clarity +//struct sched_hash_value { +// ggml_tallocr_t tallocr; +// struct ggml_tensor * copies[GGML_MAX_BACKENDS]; +//}; + struct ggml_backend_sched { int n_backends; ggml_backend_t backends[GGML_MAX_BACKENDS]; @@ -747,11 +754,15 @@ struct ggml_backend_sched { ggml_gallocr_t galloc; + // hash keys of the nodes in the graph struct ggml_hash_set hash_set; - ggml_tallocr_t * node_talloc; // [hash_set.size] - struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // [hash_set.size][GGML_MAX_BACKENDS] + // hash values (arrays of [hash_set.size]) + ggml_tallocr_t * node_talloc; // tallocr assigned to each node (indirectly this is the backend) + struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // copies of each node for each destination backend + // copy of the graph with modified inputs struct ggml_cgraph * graph; + struct ggml_backend_sched_split splits[GGML_MAX_SPLITS]; int n_splits; @@ -928,6 +939,12 @@ static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, co return dup; } + +//#define DEBUG_PASS1 +//#define DEBUG_PASS2 +//#define DEBUG_PASS3 +//#define DEBUG_PASS4 + // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend // TODO: merge passes static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { @@ -977,17 +994,32 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend); } } - //printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#ifdef DEBUG_PASS1 + fprintf(stderr, "PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#endif // pass 2: assign backends to ops from current assignments // start from the end and assign the same backend to previous ops + + // expand gpu backends (ie non last prio) up and down, ignoring cpu + // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops + + // pass 2.1 expand gpu up { ggml_tallocr_t cur_allocr = NULL; for (int i = graph->n_nodes - 1; i >= 0; i--) { struct ggml_tensor * node = graph->nodes[i]; + if (ggml_is_view_op(node->op)) { + continue; + } ggml_tallocr_t node_allocr = node_allocr(node); if (node_allocr != NULL) { - cur_allocr = node_allocr; + if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) { + cur_allocr = NULL; + } + else { + cur_allocr = node_allocr; + } } else { node_allocr(node) = cur_allocr; SET_CAUSE(node, "2.cur"); @@ -995,12 +1027,58 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } } - //printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); + // pass 2.2 expand gpu down + { + ggml_tallocr_t cur_allocr = NULL; + for (int i = 0; i < graph->n_nodes; i++) { + struct ggml_tensor * node = graph->nodes[i]; + if (ggml_is_view_op(node->op)) { + continue; + } + ggml_tallocr_t node_allocr = node_allocr(node); + if (node_allocr != NULL) { + if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) { + cur_allocr = NULL; + } + else { + cur_allocr = node_allocr; + } + } else { + node_allocr(node) = cur_allocr; + SET_CAUSE(node, "2.cur"); + } + } + } + + // pass 2.3 expand rest up + { + ggml_tallocr_t cur_allocr = NULL; + for (int i = graph->n_nodes - 1; i >= 0; i--) { + struct ggml_tensor * node = graph->nodes[i]; + if (ggml_is_view_op(node->op)) { + continue; + } + ggml_tallocr_t node_allocr = node_allocr(node); + if (node_allocr != NULL) { + cur_allocr = node_allocr; + } else { + node_allocr(node) = cur_allocr; + SET_CAUSE(node, "2.cur"); + } + } + } +#ifdef DEBUG_PASS2 + fprintf(stderr, "PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#endif - // pass 3: assign backends to remaining src from dst (should only be leafs) + // pass 3: assign backends to remaining src from dst and view_src for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; - ggml_tallocr_t node_allocr = node_allocr(node); + ggml_tallocr_t cur_allocr = node_allocr(node); + if (ggml_is_view_op(node->op) && cur_allocr == NULL) { + cur_allocr = node_allocr(node) = node_allocr(node->view_src); + SET_CAUSE(node, "3.vsrc"); + } for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { @@ -1008,11 +1086,18 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } ggml_tallocr_t src_allocr = node_allocr(src); if (src_allocr == NULL) { - node_allocr(src) = node_allocr; + if (src->view_src != NULL) { + // views are always on the same backend as the source + node_allocr(src) = node_allocr(src->view_src); + } else { + node_allocr(src) = cur_allocr; + } } } } - //printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#ifdef DEBUG_PASS3 + fprintf(stderr, "PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#endif // pass 4: split graph, find tensors that need to be copied { @@ -1074,7 +1159,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src; } - // create copies + // create a copy of the input in the split's backend size_t id = hash_id(src); if (sched->node_copies[id][cur_backend_id] == NULL) { struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); @@ -1090,8 +1175,9 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g sched->splits[cur_split].i_end = graph->n_nodes; sched->n_splits = cur_split + 1; } - - //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#ifdef DEBUG_PASS4 + fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); +#endif #ifndef NDEBUG // sanity check: all sources should have the same backend as the node @@ -1101,6 +1187,11 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g if (node_allocr == NULL) { fprintf(stderr, "!!!!!!! %s has no backend\n", node->name); } + if (node->view_src != NULL && node_allocr != node_allocr(node->view_src)) { + fprintf(stderr, "!!!!!!! %s has backend %s, view_src %s has backend %s\n", + node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL", + node->view_src->name, node_allocr(node->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(node->view_src))) : "NULL"); + } for (int j = 0; j < GGML_MAX_SRC; j++) { struct ggml_tensor * src = node->src[j]; if (src == NULL) { @@ -1112,8 +1203,14 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL", j, src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL"); } + if (src->view_src != NULL && src_allocr != node_allocr(src->view_src)) { + fprintf(stderr, "!!!!!!! [src] %s has backend %s, view_src %s has backend %s\n", + src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL", + src->view_src->name, node_allocr(src->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(src->view_src))) : "NULL"); + } } } + fflush(stderr); #endif // create copies of the graph for each split @@ -1127,6 +1224,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g for (int j = 0; j < split->n_inputs; j++) { struct ggml_tensor * input = split->inputs[j]; struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)]; + // add a dependency to the input source so that it is not freed before the copy is done input_cpy->src[0] = input; graph_copy->nodes[graph_copy->n_nodes++] = input_cpy; } @@ -1163,19 +1261,20 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { struct ggml_tensor * input = split->inputs[j]; struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_backend_prio(sched, split_backend)]; if (input->buffer == NULL) { + GGML_ASSERT(false); if (input->view_src == NULL) { fprintf(stderr, "input %s has no buffer and no view_src\n", input->name); - exit(1); + GGML_ASSERT(false); } // FIXME: may need to use the sched buffer instead ggml_backend_view_init(input->view_src->buffer, input); } if (input_cpy->buffer == NULL) { fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name); - exit(1); + GGML_ASSERT(false); } - //GGML_ASSERT(input->buffer->backend != input_cpy->buffer->backend); - //GGML_ASSERT(input_cpy->buffer->backend == split_backend); + // TODO: avoid this copy if it was already copied in a previous split, and the input didn't change + // this is important to avoid copying constants such as KQ_mask and inp_pos multiple time ggml_backend_tensor_copy(input, input_cpy); } // ggml_backend_synchronize(split_backend); @@ -1301,6 +1400,7 @@ void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml } // utils + void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { GGML_ASSERT(tensor->buffer == NULL); //GGML_ASSERT(tensor->data == NULL); // views of pre-allocted tensors may have the data set, but still need to be initialized diff --git a/ggml-cuda.cu b/ggml-cuda.cu index a17eeb2936c52..52bb8902c7a6f 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9712,6 +9712,7 @@ static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE) continue; +#ifndef NDEBUG assert(node->backend == GGML_BACKEND_GPU); assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); assert(node->extra != nullptr); @@ -9723,35 +9724,13 @@ static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph assert(node->src[j]->extra != nullptr); } } +#endif bool ok = ggml_cuda_compute_forward(¶ms, node); if (!ok) { fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op)); } GGML_ASSERT(ok); - -#if 0 - if (node->type == GGML_TYPE_F32) { - cudaDeviceSynchronize(); - std::vector tmp(ggml_nelements(node), 0.0f); - cudaMemcpy(tmp.data(), node->data, ggml_nelements(node)*sizeof(float), cudaMemcpyDeviceToHost); - printf("\n%s (%s) (%s %s) (%s %s): ", node->name, ggml_op_name(node->op), - ggml_type_name(node->src[0]->type), - node->src[1] ? ggml_type_name(node->src[1]->type) : "none", - node->src[0]->name, - node->src[1] ? node->src[1]->name : "none"); - double sum = 0.0; - double sq_sum = 0.0; - for (int i = 0; i < ggml_nelements(node); i++) { - printf("%f ", tmp[i]); - sum += tmp[i]; - sq_sum += tmp[i]*tmp[i]; - } - printf("\n"); - printf("sum: %f, ", sum); - printf("sq_sum: %f\n", sq_sum); - } -#endif } UNUSED(backend); diff --git a/llama.cpp b/llama.cpp index d8fe1554f6f2d..5104350ca88d6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1730,7 +1730,6 @@ static bool llama_kv_cache_init( return false; } ggml_backend_buffer_clear(buf, 0); - // FIXME: buffer type name LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); cache.bufs.push_back(buf); } @@ -2463,9 +2462,9 @@ struct llama_model_loader { for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); if (!cur) { + // some tensors may be allocated in a different context continue; } - GGML_ASSERT(cur); // unused tensors should have been caught by load_data already if (progress_callback) { if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) { @@ -3734,6 +3733,8 @@ static bool llm_load_tensors( if (buf == nullptr) { throw std::runtime_error("failed to allocate buffer"); } + // indicate that this buffer contains weights + // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are always scheduled to the backend that contains the weight ggml_backend_buffer_set_usage(buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); model.bufs.push_back(buf); ctx_bufs.emplace_back(ctx, buf); @@ -4336,6 +4337,12 @@ struct llm_build_context { cb(Vcur, "Vcur", il); } + // these nodes are added to the graph together so that they are not reordered + // by doing so, the number of splits in the graph is reduced + ggml_build_forward_expand(gf, Qcur); + ggml_build_forward_expand(gf, Kcur); + ggml_build_forward_expand(gf, Vcur); + Qcur = ggml_rope_custom( ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale, From 2f2c36799d645157c79bb07edcb718cb324d92fa Mon Sep 17 00:00:00 2001 From: slaren Date: Sat, 6 Jan 2024 23:07:43 +0100 Subject: [PATCH 08/34] cuda : add ggml-backend split buffer support --- ggml-backend.c | 10 +- ggml-cuda.cu | 351 +++++++++++++++++++++++++++++++++++++++++-------- ggml-cuda.h | 2 + llama.cpp | 29 ++-- 4 files changed, 318 insertions(+), 74 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 31516b4c55efe..56f96c3671ec1 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1015,9 +1015,9 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g ggml_tallocr_t node_allocr = node_allocr(node); if (node_allocr != NULL) { if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) { + // skip cpu cur_allocr = NULL; - } - else { + } else { cur_allocr = node_allocr; } } else { @@ -1038,9 +1038,9 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g ggml_tallocr_t node_allocr = node_allocr(node); if (node_allocr != NULL) { if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) { + // skip cpu cur_allocr = NULL; - } - else { + } else { cur_allocr = node_allocr; } } else { @@ -1274,7 +1274,7 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { GGML_ASSERT(false); } // TODO: avoid this copy if it was already copied in a previous split, and the input didn't change - // this is important to avoid copying constants such as KQ_mask and inp_pos multiple time + // this is important to avoid copying constants such as KQ_mask and inp_pos multiple times ggml_backend_tensor_copy(input, input_cpy); } // ggml_backend_synchronize(split_backend); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 52bb8902c7a6f..0969c26e603fc 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -10,7 +10,11 @@ #include #include #include - +#include +#include +#include "ggml-cuda.h" +#include "ggml.h" +#include "ggml-backend-impl.h" #if defined(GGML_USE_HIPBLAS) #include @@ -114,10 +118,6 @@ #endif // defined(GGML_USE_HIPBLAS) -#include "ggml-cuda.h" -#include "ggml.h" -#include "ggml-backend-impl.h" - #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products #define CC_VOLTA 700 #define CC_OFFSET_AMD 1000000 @@ -546,7 +546,7 @@ static void ggml_cuda_set_device(const int device) { static int g_device_count = -1; static int g_main_device = 0; -static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0}; +static std::array g_default_tensor_split = {}; struct cuda_device_capabilities { int cc; // compute capability @@ -6854,8 +6854,9 @@ void ggml_init_cublas() { CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); fprintf(stderr, " Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); - g_tensor_split[id] = total_vram; + g_default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; + #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) g_device_caps[id].cc = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD; #else @@ -6863,7 +6864,7 @@ void ggml_init_cublas() { #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) } for (int id = 0; id < g_device_count; ++id) { - g_tensor_split[id] /= total_vram; + g_default_tensor_split[id] /= total_vram; } for (int id = 0; id < g_device_count; ++id) { @@ -6887,33 +6888,6 @@ void ggml_init_cublas() { } } -// TODO: cleanup this after the split buffer type is implemented -#if 0 -void ggml_cuda_set_tensor_split(const float * tensor_split) { - if (tensor_split == nullptr) { - return; - } - bool all_zero = true; - for (int i = 0; i < g_device_count; ++i) { - if (tensor_split[i] != 0.0f) { - all_zero = false; - break; - } - } - if (all_zero) { - return; - } - float split_sum = 0.0f; - for (int i = 0; i < g_device_count; ++i) { - g_tensor_split[i] = split_sum; - split_sum += tensor_split[i]; - } - for (int i = 0; i < g_device_count; ++i) { - g_tensor_split[i] /= split_sum; - } -} -#endif - void * ggml_cuda_host_malloc(size_t size) { if (getenv("GGML_CUDA_NO_PINNED") != nullptr) { return nullptr; @@ -7365,11 +7339,11 @@ static void ggml_cuda_op_mul_mat_q( (void) src1_ddf_i; } -static int64_t get_row_rounding(ggml_type type) { +static int64_t get_row_rounding(ggml_type type, const std::array & tensor_split) { int64_t min_compute_capability = INT_MAX; int64_t max_compute_capability = INT_MIN; for (int id = 0; id < g_device_count; ++id) { - if (g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { + if (tensor_split[id] < (id + 1 < g_device_count ? tensor_split[id + 1] : 1.0f)) { if (min_compute_capability > g_device_caps[id].cc) { min_compute_capability = g_device_caps[id].cc; } @@ -7426,6 +7400,21 @@ static int64_t get_row_rounding(ggml_type type) { #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) } +static void get_row_split(int64_t * row_low, int64_t * row_high, const ggml_tensor * tensor, const std::array & tensor_split, int id) { + const int64_t nrows = ggml_nrows(tensor); + const int64_t rounding = get_row_rounding(tensor->type, tensor_split); + + *row_low = id == 0 ? 0 : nrows*tensor_split[id]; + *row_low -= *row_low % rounding; + + if (id == g_device_count - 1) { + *row_high = nrows; + } else { + *row_high = nrows*tensor_split[id + 1]; + *row_high -= *row_high % rounding; + } +} + static void ggml_cuda_op_mul_mat_vec_q( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i, const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols, @@ -8023,6 +8012,11 @@ static void ggml_cuda_set_peer_access(const int n_tokens) { peer_access_enabled = enable_peer_access; } +// FIXME: move this somewhere else +struct ggml_backend_cuda_split_buffer_type_context { + std::array tensor_split; +}; + static void ggml_cuda_op_mul_mat( const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, ggml_cuda_op_mul_mat_t op, const bool convert_src1_to_q8_1) { @@ -8074,6 +8068,14 @@ static void ggml_cuda_op_mul_mat( GGML_ASSERT(!(split && ne03 > 1)); GGML_ASSERT(!(split && ne02 < ne12)); + std::array tensor_split; + if (split) { + // TODO: check that src0->buffer->buft is a split buffer type, replace GGML_BACKEND_GPU_SPLIT check + // GGML_ASSERT(src0->buffer != nullptr && src0->buffer->buft == ...); + ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context; + tensor_split = buft_ctx->tensor_split; + } + struct dev_data { cuda_pool_alloc src0_dd_alloc; cuda_pool_alloc src1_ddf_alloc; @@ -8101,17 +8103,17 @@ static void ggml_cuda_op_mul_mat( // for multi GPU, get the row boundaries from tensor split // and round to mul_mat_q tile sizes if (split) { - const int64_t rounding = get_row_rounding(src0->type); + const int64_t rounding = get_row_rounding(src0->type, tensor_split); if (id != 0) { - dev[id].row_low = ne01*g_tensor_split[id]; + dev[id].row_low = ne01*tensor_split[id]; if (dev[id].row_low < ne01) { dev[id].row_low -= dev[id].row_low % rounding; } } if (id != g_device_count - 1) { - dev[id].row_high = ne01*g_tensor_split[id + 1]; + dev[id].row_high = ne01*tensor_split[id + 1]; if (dev[id].row_high < ne01) { dev[id].row_high -= dev[id].row_high % rounding; } @@ -8657,10 +8659,17 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT; int64_t min_compute_capability = INT_MAX; - for (int id = 0; id < g_device_count; ++id) { - if (min_compute_capability > g_device_caps[id].cc && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) { - min_compute_capability = g_device_caps[id].cc; + + if (split) { + ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *) src0->buffer->buft->context; + auto & tensor_split = buft_ctx->tensor_split; + for (int id = 0; id < g_device_count; ++id) { + if (min_compute_capability > g_device_caps[id].cc && tensor_split[id] < (id + 1 < g_device_count ? tensor_split[id + 1] : 1.0f)) { + min_compute_capability = g_device_caps[id].cc; + } } + } else { + min_compute_capability = g_device_caps[g_main_device].cc; } #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) @@ -9435,8 +9444,6 @@ static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, g CUDA_CHECK(cudaMemsetAsync((char *)tensor->data + original_size, 0, padded_size - original_size, g_cudaStreams[ctx->device][0])); } } - - UNUSED(buffer); } static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { @@ -9469,7 +9476,7 @@ static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t CUDA_CHECK(cudaDeviceSynchronize()); } -static struct ggml_backend_buffer_i cuda_backend_buffer_interface = { +static struct ggml_backend_buffer_i ggml_cuda_backend_buffer_interface = { /* .get_name = */ ggml_backend_cuda_buffer_get_name, /* .free_buffer = */ ggml_backend_cuda_buffer_free_buffer, /* .get_base = */ ggml_backend_cuda_buffer_get_base, @@ -9510,7 +9517,7 @@ static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_bac ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(buft_ctx->device, dev_ptr); - return ggml_backend_buffer_init(buft, cuda_backend_buffer_interface, ctx, size); + return ggml_backend_buffer_init(buft, ggml_cuda_backend_buffer_interface, ctx, size); } static size_t ggml_backend_cuda_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { @@ -9560,6 +9567,7 @@ static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { }; ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { + // FIXME: this is not thread safe static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES]; static bool ggml_backend_cuda_buffer_type_initialized = false; @@ -9577,16 +9585,255 @@ ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { return &ggml_backend_cuda_buffer_types[device]; } +// cuda split buffer + +struct ggml_backend_cuda_split_buffer_context { + ~ggml_backend_cuda_split_buffer_context() { + for (ggml_tensor_extra_gpu * extra : tensor_extras) { + for (int id = 0; id < g_device_count; ++id) { + for (int64_t is = 0; is < MAX_STREAMS; ++is) { + CUDA_CHECK(cudaEventDestroy(extra->events[id][is])); + } + CUDA_CHECK(cudaFree(extra->data_device[id])); + } + delete extra; + } + } + + std::vector tensor_extras; +}; + +static const char * ggml_backend_cuda_split_buffer_get_name(ggml_backend_buffer_t buffer) { + return GGML_CUDA_NAME "_Split"; + + UNUSED(buffer); +} + +static void ggml_backend_cuda_split_buffer_free_buffer(ggml_backend_buffer_t buffer) { + ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context; + delete ctx; +} + +static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buffer) { + // the pointers are stored in the tensor extras, this is just a dummy address and never dereferenced + return (void *)0x1000; + + UNUSED(buffer); +} + +static void ggml_backend_cuda_split_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { + GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported + + ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context; + ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context; + + const int64_t ne0 = tensor->ne[0]; + + ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{}; + + ctx->tensor_extras.push_back(extra); + + for (int id = 0; id < g_device_count; ++id) { + int64_t row_low, row_high; + get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id); + + int64_t nrows_split = row_high - row_low; + if (nrows_split == 0) { + continue; + } + + size_t size = ggml_nbytes_split(tensor, nrows_split); + const size_t original_size = size; + + // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses + if (ne0 % MATRIX_ROW_PADDING != 0) { + size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); + } + + // FIXME: do not crash if cudaMalloc fails + // currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first + ggml_cuda_set_device(id); + char * buf; + CUDA_CHECK(cudaMalloc(&buf, size)); + + // set padding to 0 to avoid possible NaN values + if (size > original_size) { + CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size)); + } + + extra->data_device[id] = buf; + + for (int64_t is = 0; is < MAX_STREAMS; ++is) { + CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id][is], cudaEventDisableTiming)); + } + } + tensor->backend = GGML_BACKEND_GPU_SPLIT; + tensor->extra = extra; +} + +static void ggml_backend_cuda_split_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + // split tensors must always be set in their entirety at once + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + + ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context; + + const int64_t ne0 = tensor->ne[0]; + const size_t nb1 = tensor->nb[1]; + ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra; + + for (int id = 0; id < g_device_count; ++id) { + int64_t row_low, row_high; + get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id); + + int64_t nrows_split = row_high - row_low; + if (nrows_split == 0) { + continue; + } + + const size_t offset_split = row_low*nb1; + size_t size = ggml_nbytes_split(tensor, nrows_split); + const size_t original_size = size; + + // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses + if (ne0 % MATRIX_ROW_PADDING != 0) { + size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); + } + + const char * buf_host = (const char *)data + offset_split; + CUDA_CHECK(cudaMemcpy(extra->data_device[id], buf_host, original_size, cudaMemcpyHostToDevice)); + } +} + +static struct ggml_backend_buffer_i ggml_cuda_backend_split_buffer_interface = { + /* .get_name = */ ggml_backend_cuda_split_buffer_get_name, + /* .free_buffer = */ ggml_backend_cuda_split_buffer_free_buffer, + /* .get_base = */ ggml_backend_cuda_split_buffer_get_base, + /* .init_tensor = */ ggml_backend_cuda_split_buffer_init_tensor, + /* .set_tensor = */ ggml_backend_cuda_split_buffer_set_tensor, + /* .get_tensor = */ NULL, + /* .cpy_tensor_from = */ NULL, + /* .cpy_tensor_to = */ NULL, + /* .clear = */ NULL, +}; + +// cuda split buffer type + +static const char * ggml_backend_cuda_split_buffer_type_name(ggml_backend_buffer_type_t buft) { + return GGML_CUDA_NAME "_Split"; + + UNUSED(buft); +} + +static ggml_backend_buffer_t ggml_backend_cuda_split_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + // since we don't know the exact split after rounding, we cannot allocate the device buffers at this point + // instead, we allocate them for each tensor separately in init_tensor + // however, the size still represents the maximum cumulative size of all the device buffers after the tensors are allocated, + // as returned by get_alloc_size. this limit is enforced during tensor allocation by ggml-alloc, so it must be correct. + ggml_backend_cuda_split_buffer_context * ctx = new ggml_backend_cuda_split_buffer_context(); + + return ggml_backend_buffer_init(buft, ggml_cuda_backend_split_buffer_interface, ctx, size); +} + +static size_t ggml_backend_cuda_split_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + return 128; + + UNUSED(buft); +} + +static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, ggml_tensor * tensor) { + ggml_backend_cuda_split_buffer_type_context * ctx = (ggml_backend_cuda_split_buffer_type_context *)buft->context; + + size_t total_size = 0; + + const int64_t ne0 = tensor->ne[0]; + + for (int id = 0; id < g_device_count; ++id) { + int64_t row_low, row_high; + get_row_split(&row_low, &row_high, tensor, ctx->tensor_split, id); + + int64_t nrows_split = row_high - row_low; + if (nrows_split == 0) { + continue; + } + + total_size += ggml_nbytes_split(tensor, nrows_split); + + // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses + if (ne0 % MATRIX_ROW_PADDING != 0) { + total_size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); + } + } + + return total_size; +} + +static bool ggml_backend_cuda_split_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) { + return ggml_backend_is_cuda(backend); + + UNUSED(buft); +} + +static bool ggml_backend_cuda_split_buffer_type_is_host(ggml_backend_buffer_type_t buft) { + return false; + + UNUSED(buft); +} + +static ggml_backend_buffer_type_i ggml_backend_cuda_split_buffer_type_interface = { + /* .get_name = */ ggml_backend_cuda_split_buffer_type_name, + /* .alloc_buffer = */ ggml_backend_cuda_split_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cuda_split_buffer_type_get_alignment, + /* .get_alloc_size = */ ggml_backend_cuda_split_buffer_type_get_alloc_size, + /* .supports_backend = */ ggml_backend_cuda_split_buffer_type_supports_backend, + /* .is_host = */ ggml_backend_cuda_split_buffer_type_is_host, +}; + +ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split) { + // FIXME: this is not thread safe + static std::map, struct ggml_backend_buffer_type> buft_map; + + + std::array tensor_split_arr = {}; + + bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + GGML_CUDA_MAX_DEVICES, [](float x) { return x == 0.0f; }); + if (all_zero) { + tensor_split_arr = g_default_tensor_split; + } else { + float split_sum = 0.0f; + for (int i = 0; i < g_device_count; ++i) { + tensor_split_arr[i] = split_sum; + split_sum += tensor_split[i]; + } + for (int i = 0; i < g_device_count; ++i) { + tensor_split_arr[i] /= split_sum; + } + } + + auto it = buft_map.find(tensor_split_arr); + if (it != buft_map.end()) { + return &it->second; + } + + struct ggml_backend_buffer_type buft { + /* .iface = */ ggml_backend_cuda_split_buffer_type_interface, + /* .context = */ new ggml_backend_cuda_split_buffer_type_context{tensor_split_arr}, + }; + + auto result = buft_map.emplace(tensor_split_arr, buft); + return &result.first->second; +} + // host buffer type static const char * ggml_backend_cuda_host_buffer_type_name(ggml_backend_buffer_type_t buft) { - return "CUDA_Host"; + return GGML_CUDA_NAME "_Host"; UNUSED(buft); } static const char * ggml_backend_cuda_host_buffer_name(ggml_backend_buffer_t buffer) { - return "CUDA_Host"; + return GGML_CUDA_NAME "_Host"; UNUSED(buffer); } @@ -9713,14 +9960,14 @@ static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph continue; #ifndef NDEBUG - assert(node->backend == GGML_BACKEND_GPU); + assert(node->backend == GGML_BACKEND_GPU || node->backend == GGML_BACKEND_GPU_SPLIT); assert(node->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); assert(node->extra != nullptr); for (int j = 0; j < GGML_MAX_SRC; j++) { if (node->src[j] != nullptr) { - assert(node->src[j]->backend == GGML_BACKEND_GPU); - assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); + assert(node->src[j]->backend == GGML_BACKEND_GPU || node->src[j]->backend == GGML_BACKEND_GPU_SPLIT); + //assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); assert(node->src[j]->extra != nullptr); } } diff --git a/ggml-cuda.h b/ggml-cuda.h index 7b48782d64e95..81d52040e0db1 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -38,6 +38,8 @@ GGML_API ggml_backend_t ggml_backend_cuda_init(int device); GGML_API bool ggml_backend_is_cuda(ggml_backend_t backend); GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device); +// split tensor buffer that splits matrices by rows across multiple devices +GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * tensor_split); // pinned host buffer for use with the CPU backend for faster copies between CPU and GPU GGML_API ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type(void); diff --git a/llama.cpp b/llama.cpp index 5104350ca88d6..e345a86c81445 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1238,16 +1238,17 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) { GGML_UNUSED(gpu); } -static ggml_backend_buffer_type_t llama_default_buffer_type_split(int main_gpu, const float * tensor_split) { +static ggml_backend_buffer_type_t llama_default_buffer_type_split(int fallback_gpu, const float * tensor_split) { ggml_backend_buffer_type_t buft = nullptr; #ifdef GGML_USE_CUBLAS - // TODO - // buft = ggml_backend_cuda_buffer_type_split(tensor_split); + if (ggml_backend_cuda_get_device_count() > 1) { + buft = ggml_backend_cuda_split_buffer_type(tensor_split); + } #endif if (buft == nullptr) { - buft = llama_default_buffer_type_offload(main_gpu); + buft = llama_default_buffer_type_offload(fallback_gpu); } return buft; @@ -2357,13 +2358,6 @@ struct llama_model_loader { throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); } - // FIXME: this should be ggml_backend_cuda_split_buffer_type - //if (backend == GGML_BACKEND_GPU_SPLIT) { - // if (ne.size() == 1) { - // throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str())); - // } - //} - { bool is_ok = true; for (size_t i = 0; i < ne.size(); ++i) { @@ -3148,12 +3142,12 @@ static bool llm_load_tensors( // TODO: user configurable enum gpu_split_mode { - CUDA_SPLIT_NONE, // single GPU - CUDA_SPLIT_LAYER, // offload layers to different GPUs - CUDA_SPLIT_ROW // split matrix rows across GPUs + LLAMA_SPLIT_NONE, // single GPU + LLAMA_SPLIT_LAYER, // offload layers to different GPUs + LLAMA_SPLIT_ROW // split matrix rows across GPUs }; - gpu_split_mode split_mode = CUDA_SPLIT_LAYER; + gpu_split_mode split_mode = LLAMA_SPLIT_LAYER; const int64_t n_layer = hparams.n_layer; const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0); @@ -3167,7 +3161,7 @@ static bool llm_load_tensors( } #ifdef GGML_USE_CUBLAS - if (split_mode == CUDA_SPLIT_LAYER) { + if (split_mode == LLAMA_SPLIT_LAYER) { // calculate the split points int device_count = ggml_backend_cuda_get_device_count(); float splits[GGML_CUDA_MAX_DEVICES]; @@ -9181,7 +9175,8 @@ struct llama_context * llama_new_context_with_model( std::vector backends; // initialize backends - // TODO: only initialize the backends that are actually used + // FIXME: only initialize the backends that are actually used + // this is important for CUDA split buffers, only the main_gpu backend should be initialized #ifdef GGML_USE_METAL if (model->n_gpu_layers > 0) { ctx->backend = ggml_backend_metal_init(); From 72b74f364b4ccfe3d91e684ef6c6b787f085f208 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 7 Jan 2024 00:33:51 +0100 Subject: [PATCH 09/34] cuda : do not create buffer types for devices that don't exist (fixes usage without CUDA devices available) --- ggml-cuda.cu | 5 ++++- ggml-cuda.h | 2 -- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 0969c26e603fc..032d7e98689e1 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9568,6 +9568,10 @@ static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { // FIXME: this is not thread safe + if (device >= ggml_backend_cuda_get_device_count()) { + return nullptr; + } + static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES]; static bool ggml_backend_cuda_buffer_type_initialized = false; @@ -9793,7 +9797,6 @@ ggml_backend_buffer_type_t ggml_backend_cuda_split_buffer_type(const float * ten // FIXME: this is not thread safe static std::map, struct ggml_backend_buffer_type> buft_map; - std::array tensor_split_arr = {}; bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + GGML_CUDA_MAX_DEVICES, [](float x) { return x == 0.0f; }); diff --git a/ggml-cuda.h b/ggml-cuda.h index 81d52040e0db1..d19cbf3fdd04b 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -47,8 +47,6 @@ GGML_API int ggml_backend_cuda_get_device_count(void); GGML_API void ggml_backend_cuda_get_device_description(int device, char * description, size_t description_size); GGML_API void ggml_backend_cuda_get_device_memory(int device, size_t * free, size_t * total); - - #ifdef __cplusplus } #endif From f77c72f3718711ce2f29787e40ec4638e2be6db8 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 7 Jan 2024 13:06:57 +0200 Subject: [PATCH 10/34] ggml : fix null backend dereference (#4807) * ggml : fix null backend dereference * ggml : also check ggml_backend_is_cpu --- ggml-backend.c | 2 +- ggml-cuda.cu | 2 +- ggml-metal.m | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 56f96c3671ec1..8d66c5605de46 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -703,7 +703,7 @@ ggml_backend_t ggml_backend_cpu_init(void) { } bool ggml_backend_is_cpu(ggml_backend_t backend) { - return backend->iface.get_name == ggml_backend_cpu_name; + return backend && backend->iface.get_name == ggml_backend_cpu_name; } void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) { diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 032d7e98689e1..ee82f68d74e9a 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -10140,7 +10140,7 @@ ggml_backend_t ggml_backend_cuda_init(int device) { } bool ggml_backend_is_cuda(ggml_backend_t backend) { - return backend->iface.get_name == ggml_backend_cuda_name; + return backend && backend->iface.get_name == ggml_backend_cuda_name; } int ggml_backend_cuda_get_device_count() { diff --git a/ggml-metal.m b/ggml-metal.m index e3d76b9d3d332..00b377a3e1389 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2749,7 +2749,7 @@ ggml_backend_t ggml_backend_metal_init(void) { } bool ggml_backend_is_metal(ggml_backend_t backend) { - return backend->iface.get_name == ggml_backend_metal_name; + return backend && backend->iface.get_name == ggml_backend_metal_name; } void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) { From 7c16cf106d1f6410a7d75845ed29c78b662786ee Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 7 Jan 2024 13:50:02 +0100 Subject: [PATCH 11/34] test-backend-ops : check buffer allocation failures --- ggml-backend.c | 37 ++++++++++++++++++++++++++++++++++++- ggml-backend.h | 2 +- tests/test-backend-ops.cpp | 18 ++++++++++++++++-- 3 files changed, 53 insertions(+), 4 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 8d66c5605de46..c0322120019a0 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1503,6 +1503,21 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s struct ggml_context * ctx_allocated = ggml_init(params); struct ggml_context * ctx_unallocated = ggml_init(params); + if (ctx_allocated == NULL || ctx_unallocated == NULL) { + fprintf(stderr, "failed to allocate context for graph copy\n"); + free(hash_set.keys); + free(node_copies); + free(node_init); + ggml_free(ctx_allocated); + ggml_free(ctx_unallocated); + return (struct ggml_backend_graph_copy) { + /* .buffer = */ NULL, + /* .ctx_allocated = */ NULL, + /* .ctx_unallocated = */ NULL, + /* .graph = */ NULL, + }; + } + // dup nodes for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; @@ -1511,6 +1526,20 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s // allocate nodes ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend); + if (buffer == NULL) { + fprintf(stderr, "failed to allocate buffer for graph copy\n"); + free(hash_set.keys); + free(node_copies); + free(node_init); + ggml_free(ctx_allocated); + ggml_free(ctx_unallocated); + return (struct ggml_backend_graph_copy) { + /* .buffer = */ NULL, + /* .ctx_allocated = */ NULL, + /* .ctx_unallocated = */ NULL, + /* .graph = */ NULL, + }; + } //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024); @@ -1547,8 +1576,12 @@ void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) { ggml_free(copy.ctx_unallocated); } -void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) { +bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) { struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph); + if (copy.buffer == NULL) { + return false; + } + struct ggml_cgraph * g1 = graph; struct ggml_cgraph * g2 = copy.graph; @@ -1578,4 +1611,6 @@ void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t } ggml_backend_graph_copy_free(copy); + + return true; } diff --git a/ggml-backend.h b/ggml-backend.h index fc1e5cd0325d9..a50c17462459d 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -189,7 +189,7 @@ extern "C" { typedef bool (*ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data); // Compare the output of two backends - GGML_API void ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data); + GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data); // Tensor initialization GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index b79de7a7dd5cc..1f95258a84475 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -376,6 +376,11 @@ struct test_case { // allocate ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend1); + if (buf == NULL) { + printf("failed to allocate tensors [%s] ", ggml_backend_name(backend1)); + ggml_free(ctx); + return false; + } // build graph ggml_build_forward_expand(gf, out); @@ -463,9 +468,13 @@ struct test_case { GGML_UNUSED(index); }; - ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud); + bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud); + + if (!cmp_ok) { + printf("compare failed "); + } - if (ud.ok) { + if (ud.ok && cmp_ok) { printf("\033[1;32mOK\033[0m\n"); } else { printf("\033[1;31mFAIL\033[0m\n"); @@ -519,6 +528,11 @@ struct test_case { // allocate ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend); + if (buf == NULL) { + printf("failed to allocate tensors\n"); + ggml_free(ctx); + return false; + } // randomize tensors initialize_tensors(ctx); From 5e879c997799f2ab09a40df8f3729ec63c092a5f Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 7 Jan 2024 23:26:49 +0100 Subject: [PATCH 12/34] llama : add cparam (split_mode) and command line argument (--split-mode, -sm) to configure the split mode (none, layer or row) --- common/common.cpp | 35 ++++++++++++++++++++++++++++------- common/common.h | 1 + ggml-backend.c | 2 ++ llama.cpp | 30 ++++++++++++++++++------------ llama.h | 15 +++++++++++++-- 5 files changed, 62 insertions(+), 21 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index eacaee18e0907..47f9381cfa028 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -556,6 +556,25 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { #else fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n"); #endif + } else if (arg == "--split-mode" || arg == "-sm") { + if (++i >= argc) { + invalid_param = true; + break; + } + std::string arg_next = argv[i]; + if (arg_next == "none") { + params.split_mode = LLAMA_SPLIT_NONE; + } else if (arg_next == "layer") { + params.split_mode = LLAMA_SPLIT_LAYER; + } else if (arg_next == "row") { + params.split_mode = LLAMA_SPLIT_ROW; + } else { + invalid_param = true; + break; + } +#ifndef GGML_USE_CUBLAS + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n"); +#endif // GGML_USE_CUBLAS } else if (arg == "--tensor-split" || arg == "-ts") { if (++i >= argc) { invalid_param = true; @@ -895,14 +914,15 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" number of layers to store in VRAM\n"); printf(" -ngld N, --n-gpu-layers-draft N\n"); printf(" number of layers to store in VRAM for the draft model\n"); + printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n"); + printf(" how to split the model across multiple GPUs, one of:\n"); + printf(" - none: use one GPU only\n"); + printf(" - layer (default): split layers and KV across GPUs\n"); + printf(" - row: split rows across GPUs\n"); printf(" -ts SPLIT --tensor-split SPLIT\n"); - printf(" how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n"); - printf(" -mg i, --main-gpu i the GPU to use for scratch and small tensors\n"); -#ifdef GGML_USE_CUBLAS - printf(" -nommq, --no-mul-mat-q\n"); - printf(" use " GGML_CUBLAS_NAME " instead of custom mul_mat_q " GGML_CUDA_NAME " kernels.\n"); - printf(" Not recommended since this is both slower and uses more VRAM.\n"); -#endif // GGML_USE_CUBLAS + printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n"); + printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n"); + printf(" or for intermediate results and KV (with split-mode = row) (default: %d)\n", params.main_gpu); #endif printf(" --verbose-prompt print prompt before generation\n"); printf(" -dkvc, --dump-kv-cache\n"); @@ -1015,6 +1035,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & mparams.n_gpu_layers = params.n_gpu_layers; } mparams.main_gpu = params.main_gpu; + mparams.split_mode = params.split_mode; mparams.tensor_split = params.tensor_split; mparams.use_mmap = params.use_mmap; mparams.use_mlock = params.use_mlock; diff --git a/common/common.h b/common/common.h index 9659aa0453ff8..758722c11f6d3 100644 --- a/common/common.h +++ b/common/common.h @@ -59,6 +59,7 @@ struct gpt_params { float p_split = 0.1f; // speculative decoding split probability int32_t n_gpu_layers = -1; // number of layers to store in VRAM (-1 - use default) int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default) + llama_split_mode split_mode = LLAMA_SPLIT_LAYER; // how to split the model across GPUs int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs int32_t n_beams = 0; // if non-zero then use beam search of given width. diff --git a/ggml-backend.c b/ggml-backend.c index c0322120019a0..cddd784b95337 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1394,6 +1394,8 @@ ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, } void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) { + // FIXME: node_allocr is cleared when splitting the graph, so all user assignments are lost + // to avoid this, we need to clear node_allocr after compute rather than before split int backend_index = sched_backend_prio(sched, backend); GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); node_allocr(node) = sched->tallocs[backend_index]; diff --git a/llama.cpp b/llama.cpp index d615252fdbf9b..82db0a11d3f4b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3130,6 +3130,7 @@ static bool llm_load_tensors( llama_model_loader & ml, llama_model & model, int n_gpu_layers, + enum llama_split_mode split_mode, int main_gpu, const float * tensor_split, bool use_mlock, @@ -3144,14 +3145,6 @@ static bool llm_load_tensors( size_t ctx_size = ggml_tensor_overhead()*ml.n_tensors; - // TODO: user configurable - enum gpu_split_mode { - LLAMA_SPLIT_NONE, // single GPU - LLAMA_SPLIT_LAYER, // offload layers to different GPUs - LLAMA_SPLIT_ROW // split matrix rows across GPUs - }; - - gpu_split_mode split_mode = LLAMA_SPLIT_LAYER; const int64_t n_layer = hparams.n_layer; const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0); @@ -3207,13 +3200,25 @@ static bool llm_load_tensors( } else #endif { - // offload layers + ggml_backend_buffer_type_t split_buft; + if (split_mode == LLAMA_SPLIT_ROW) { + split_buft = llama_default_buffer_type_split(main_gpu, tensor_split); + } else { + split_buft = llama_default_buffer_type_offload(main_gpu); + } + // repeating layers for (int64_t i = i_gpu_start; i < n_layer; ++i) { - model.buft_layer[i] = { llama_default_buffer_type_split(main_gpu, tensor_split), llama_default_buffer_type_offload(main_gpu) }; + model.buft_layer[i] = { + split_buft, + llama_default_buffer_type_offload(main_gpu) + }; } // output layer if (n_gpu_layers > n_layer) { - model.buft_output = { llama_default_buffer_type_split(main_gpu, tensor_split), llama_default_buffer_type_offload(main_gpu) }; + model.buft_output = { + split_buft, + llama_default_buffer_type_offload(main_gpu) + }; } else { model.buft_output = llama_default_buffer_type_cpu(true); } @@ -3804,7 +3809,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, cons } if (!llm_load_tensors( - ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock, + ml, model, params.n_gpu_layers, params.split_mode, params.main_gpu, params.tensor_split, params.use_mlock, params.progress_callback, params.progress_callback_user_data )) { return -2; @@ -8964,6 +8969,7 @@ static int llama_apply_lora_from_file_internal( struct llama_model_params llama_model_default_params() { struct llama_model_params result = { /*.n_gpu_layers =*/ 0, + /*.split_mode =*/ LLAMA_SPLIT_LAYER, /*.main_gpu =*/ 0, /*.tensor_split =*/ nullptr, /*.progress_callback =*/ nullptr, diff --git a/llama.h b/llama.h index 461d4604a1b54..662c2c3be6b32 100644 --- a/llama.h +++ b/llama.h @@ -115,6 +115,12 @@ extern "C" { LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN, }; + enum llama_split_mode { + LLAMA_SPLIT_NONE = 0, // single GPU + LLAMA_SPLIT_LAYER = 1, // split layers and KV to different GPUs + LLAMA_SPLIT_ROW = 2, // split rows across GPUs + }; + typedef struct llama_token_data { llama_token id; // token id float logit; // log-odds of the token @@ -177,8 +183,13 @@ extern "C" { struct llama_model_params { int32_t n_gpu_layers; // number of layers to store in VRAM - int32_t main_gpu; // the GPU that is used for scratch and small tensors - const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES) + enum llama_split_mode split_mode; // how to split the model across multiple GPUs + // the GPU that is used for the model (LLAMA_SPLIT_NONE), + // for small tensors and intermediate results (LLAMA_SPLIT_ROW) + // ignored for LLAMA_SPLIT_LAYER + int32_t main_gpu; + // fraction of the model (layers or rows) to offload to each GPU, size: LLAMA_MAX_DEVICES + const float * tensor_split; // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. // If the provided progress_callback returns true, model loading continues. From ac145fd2e3ceaecb46e94da08204eb57a3d1811c Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 8 Jan 2024 03:51:15 +0100 Subject: [PATCH 13/34] ggml : fix mul_mat_id work size --- ggml.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ggml.c b/ggml.c index 05531a73d9ed4..0f7d9d40cc461 100644 --- a/ggml.c +++ b/ggml.c @@ -16646,14 +16646,15 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { } break; case GGML_OP_MUL_MAT_ID: { + cur = 0; const struct ggml_tensor * src0 = node->src[2]; const struct ggml_tensor * src1 = node->src[1]; const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type; if (src1->type != vec_dot_type) { - cur = ggml_row_size(vec_dot_type, ggml_nelements(src1)); + cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)); } const int n_as = ggml_get_op_params_i32(node, 1); - cur = GGML_PAD(cur, sizeof(int64_t)); // align + cur += GGML_PAD(cur, sizeof(int64_t)); // align cur += n_as * sizeof(int64_t); // matrix_row_counts cur += n_as * src1->ne[1] * sizeof(int64_t); // matrix_rows } break; From 444b975eddb9d29f6cd28ed432ce4ae422a9c9a4 Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 8 Jan 2024 12:56:31 +0100 Subject: [PATCH 14/34] llama : rewrite session kv load/set without graphs --- llama.cpp | 105 ++++++++++++++++-------------------------------------- 1 file changed, 30 insertions(+), 75 deletions(-) diff --git a/llama.cpp b/llama.cpp index 82db0a11d3f4b..11045defc211e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1466,6 +1466,14 @@ struct llama_kv_cache { std::vector ctxs; std::vector bufs; + size_t total_size() const { + size_t size = 0; + for (ggml_backend_buffer_t buf : bufs) { + size += ggml_backend_buffer_get_size(buf); + } + return size; + } + ~llama_kv_cache() { for (struct ggml_context * ctx : ctxs) { ggml_free(ctx); @@ -9565,7 +9573,7 @@ size_t llama_get_state_size(const struct llama_context * ctx) { const size_t s_embedding = ctx->embedding.size() * sizeof(float); const size_t s_kv_size = sizeof(size_t); const size_t s_kv_ntok = sizeof(int); - const size_t s_kv = ggml_backend_buffer_get_size(ctx->kv_self.bufs.at(0)); // FIXME + const size_t s_kv = ctx->kv_self.total_size(); const size_t s_total = ( + s_rng_size @@ -9694,7 +9702,7 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat const auto n_embd_v_gqa = hparams.n_embd_v_gqa(); const auto n_ctx = cparams.n_ctx; - const size_t kv_buf_size = ggml_backend_buffer_get_size(kv_self.bufs.at(0)); // FIXME + const size_t kv_buf_size = kv_self.total_size(); const uint32_t kv_head = kv_self.head; const uint32_t kv_size = kv_self.size; const uint32_t kv_used = kv_self.used; @@ -9707,46 +9715,19 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat if (kv_buf_size) { const size_t elt_size = ggml_element_size(kv_self.k_l[0]); - ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true }); - ggml_cgraph * gf = ggml_new_graph(cpy_ctx); - - std::vector kout2d(n_layer); - std::vector vout2d(n_layer); - - for (int il = 0; il < (int) n_layer; ++il) { - kout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd_k_gqa, kv_head); - vout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd_v_gqa); - - ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il], - n_embd_k_gqa, kv_head, - elt_size*n_embd_k_gqa, 0); - - ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il], - kv_head, n_embd_v_gqa, - elt_size*n_ctx, 0); - - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k2d, kout2d[il])); - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v2d, vout2d[il])); - } - - ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend); - - ggml_backend_graph_compute(ctx->backend, gf); - std::vector tmp_buf; for (int il = 0; il < (int) n_layer; ++il) { - tmp_buf.resize(ggml_nbytes(kout2d[il])); - ggml_backend_tensor_get(kout2d[il], tmp_buf.data(), 0, tmp_buf.size()); + tmp_buf.resize(elt_size*n_embd_k_gqa*kv_head); + ggml_backend_tensor_get(kv_self.k_l[il], tmp_buf.data(), 0, tmp_buf.size()); data_ctx->write(tmp_buf.data(), tmp_buf.size()); - tmp_buf.resize(ggml_nbytes(vout2d[il])); - ggml_backend_tensor_get(vout2d[il], tmp_buf.data(), 0, tmp_buf.size()); - data_ctx->write(tmp_buf.data(), tmp_buf.size()); + // v is not contiguous, copy row by row + tmp_buf.resize(elt_size*kv_head); + for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) { + ggml_backend_tensor_get(kv_self.v_l[il], tmp_buf.data(), ir*elt_size*n_ctx, tmp_buf.size()); + data_ctx->write(tmp_buf.data(), tmp_buf.size()); + } } - - ggml_free(cpy_ctx); - - ggml_backend_buffer_free(buf); } for (uint32_t i = 0; i < kv_size; ++i) { @@ -9845,48 +9826,22 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used); if (kv_buf_size) { - GGML_ASSERT(ggml_backend_buffer_get_size(kv_self.bufs.at(0)) == kv_buf_size); // FIXME + GGML_ASSERT(kv_self.total_size() == kv_buf_size); const size_t elt_size = ggml_element_size(kv_self.k_l[0]); - ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true }); - ggml_cgraph * gf = ggml_new_graph(cpy_ctx); - - std::vector kin2d(n_layer); - std::vector vin2d(n_layer); - - for (int il = 0; il < n_layer; ++il) { - kin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd_k_gqa, kv_head); - vin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd_v_gqa); - - ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il], - n_embd_k_gqa, kv_head, - elt_size*n_embd_k_gqa, 0); - - ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il], - kv_head, n_embd_v_gqa, - elt_size*n_ctx, 0); - - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin2d[il], k2d)); - ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin2d[il], v2d)); - } - - ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend); - - // load data into the tensors - for (int il = 0; il < n_layer; ++il) { - ggml_backend_tensor_set(kin2d[il], inp, 0, ggml_nbytes(kin2d[il])); - inp += ggml_nbytes(kin2d[il]); - - ggml_backend_tensor_set(vin2d[il], inp, 0, ggml_nbytes(vin2d[il])); - inp += ggml_nbytes(vin2d[il]); + for (int il = 0; il < (int) n_layer; ++il) { + size_t k_size = elt_size*n_embd_k_gqa*kv_head; + ggml_backend_tensor_set(kv_self.k_l[il], inp, 0, k_size); + inp += k_size; + + // v is not contiguous, copy row by row + size_t v_row_size = elt_size*kv_head; + for (int ir = 0; ir < (int) n_embd_v_gqa; ++ir) { + ggml_backend_tensor_set(kv_self.v_l[il], inp, ir*elt_size*n_ctx, v_row_size); + inp += v_row_size; + } } - - ggml_backend_graph_compute(ctx->backend, gf); - - ggml_free(cpy_ctx); - - ggml_backend_buffer_free(buf); } ctx->kv_self.head = kv_head; From d41cef9326cddb895b3b44edd0287602b0de774a Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 8 Jan 2024 13:42:20 +0100 Subject: [PATCH 15/34] minor --- common/common.cpp | 30 +++++++++++------------------- llama.h | 4 ++-- tests/test-backend-ops.cpp | 14 +++++++------- 3 files changed, 20 insertions(+), 28 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 47f9381cfa028..58155f7f5fe81 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -529,9 +529,8 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } -#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD params.n_gpu_layers = std::stoi(argv[i]); -#else +#ifndef LLAMA_SUPPORTS_GPU_OFFLOAD fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); #endif @@ -540,9 +539,8 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } -#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD params.n_gpu_layers_draft = std::stoi(argv[i]); -#else +#ifndef LLAMA_SUPPORTS_GPU_OFFLOAD fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers-draft option will be ignored\n"); fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); #endif @@ -551,11 +549,10 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } -#ifdef GGML_USE_CUBLAS params.main_gpu = std::stoi(argv[i]); -#else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n"); -#endif +#ifndef GGML_USE_CUBLAS + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the main GPU has no effect.\n"); +#endif // GGML_USE_CUBLAS } else if (arg == "--split-mode" || arg == "-sm") { if (++i >= argc) { invalid_param = true; @@ -580,15 +577,16 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { invalid_param = true; break; } -#ifdef GGML_USE_CUBLAS std::string arg_next = argv[i]; // split string by , and / const std::regex regex{R"([,/]+)"}; std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1}; std::vector split_arg{it, {}}; - GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES); - + if (split_arg.size() >= LLAMA_MAX_DEVICES) { + invalid_param = true; + break; + } for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) { if (i < split_arg.size()) { params.tensor_split[i] = std::stof(split_arg[i]); @@ -596,14 +594,8 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) { params.tensor_split[i] = 0.0f; } } -#else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n"); -#endif // GGML_USE_CUBLAS - } else if (arg == "--no-mul-mat-q" || arg == "-nommq") { -#ifdef GGML_USE_CUBLAS - params.mul_mat_q = false; -#else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n"); +#ifndef GGML_USE_CUBLAS + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting a tensor split has no effect.\n"); #endif // GGML_USE_CUBLAS } else if (arg == "--no-mmap") { params.use_mmap = false; diff --git a/llama.h b/llama.h index 662c2c3be6b32..605783b424ec9 100644 --- a/llama.h +++ b/llama.h @@ -117,7 +117,7 @@ extern "C" { enum llama_split_mode { LLAMA_SPLIT_NONE = 0, // single GPU - LLAMA_SPLIT_LAYER = 1, // split layers and KV to different GPUs + LLAMA_SPLIT_LAYER = 1, // split layers and KV across GPUs LLAMA_SPLIT_ROW = 2, // split rows across GPUs }; @@ -188,7 +188,7 @@ extern "C" { // for small tensors and intermediate results (LLAMA_SPLIT_ROW) // ignored for LLAMA_SPLIT_LAYER int32_t main_gpu; - // fraction of the model (layers or rows) to offload to each GPU, size: LLAMA_MAX_DEVICES + // proportion of the model (layers or rows) to offload to each GPU, size: LLAMA_MAX_DEVICES const float * tensor_split; // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 1f95258a84475..18eb4a881b47f 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -474,17 +474,17 @@ struct test_case { printf("compare failed "); } - if (ud.ok && cmp_ok) { - printf("\033[1;32mOK\033[0m\n"); - } else { - printf("\033[1;31mFAIL\033[0m\n"); - } - ggml_backend_buffer_free(buf); ggml_free(ctx); - return ud.ok; + if (ud.ok && cmp_ok) { + printf("\033[1;32mOK\033[0m\n"); + return true; + } + + printf("\033[1;31mFAIL\033[0m\n"); + return false; } bool eval_perf(ggml_backend_t backend, const char * op_name) { From 5a62db30c38cf9853bcdb00f2b0d14c17b135742 Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 8 Jan 2024 13:54:50 +0100 Subject: [PATCH 16/34] llama : only initialize used backends, free backends on context free --- llama.cpp | 53 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 20 deletions(-) diff --git a/llama.cpp b/llama.cpp index 11045defc211e..8087e9dad3c87 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1557,6 +1557,7 @@ struct llama_model { std::vector layers; + llama_split_mode split_mode; int main_gpu; int n_gpu_layers; @@ -1610,13 +1611,18 @@ struct llama_context { llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {} ~llama_context() { ggml_backend_sched_free(sched); - // TODO: free all backends - ggml_backend_free(backend); + + for (ggml_backend_t backend : backends) { + ggml_backend_free(backend); + } } llama_cparams cparams; - ggml_backend_t backend = nullptr; + std::vector backends; +#ifdef GGML_USE_METAL + ggml_backend_t backend_metal = nullptr; +#endif ggml_backend_t backend_cpu = nullptr; const llama_model & model; @@ -3148,6 +3154,7 @@ static bool llm_load_tensors( auto & hparams = model.hparams; + model.split_mode = split_mode; model.main_gpu = main_gpu; model.n_gpu_layers = n_gpu_layers; @@ -6204,8 +6211,8 @@ static int llama_decode_internal( #endif #ifdef GGML_USE_METAL - if (ggml_backend_is_metal(lctx.backend)) { - ggml_backend_metal_set_n_cb(lctx.backend, n_threads); + if (ggml_backend_is_metal(lctx.backend_metal)) { + ggml_backend_metal_set_n_cb(lctx.backend_metal, n_threads); } #endif @@ -9180,27 +9187,33 @@ struct llama_context * llama_new_context_with_model( GGML_ASSERT(hparams.n_embd_head_v % ggml_blck_size(type_v) == 0); if (!hparams.vocab_only) { - std::vector backends; - // initialize backends - // FIXME: only initialize the backends that are actually used - // this is important for CUDA split buffers, only the main_gpu backend should be initialized #ifdef GGML_USE_METAL if (model->n_gpu_layers > 0) { - ctx->backend = ggml_backend_metal_init(); - if (ctx->backend == nullptr) { + ctx->backend_metal = ggml_backend_metal_init(); + if (ctx->backend_metal == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__); } - backends.push_back(ctx->backend); + ctx->backends.push_back(ctx->backend_metal); } #elif defined(GGML_USE_CUBLAS) if (model->n_gpu_layers > 0) { - for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) { - ctx->backend = ggml_backend_cuda_init(device); - if (ctx->backend == nullptr) { - LLAMA_LOG_ERROR("%s: failed to initialize CUDA backend for device %d\n", __func__, device); + // with split_mode LLAMA_SPLIT_NONE or LLAMA_SPLIT_ROW, only the main GPU backend is used + if (model->split_mode == LLAMA_SPLIT_ROW || model->split_mode == LLAMA_SPLIT_NONE) { + ggml_backend_t backend = ggml_backend_cuda_init(model->main_gpu); + if (backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, model->main_gpu); + } + ctx->backends.push_back(backend); + } else { + // LLAMA_SPLIT_LAYER requires a backend for each GPU + for (int device = 0; device < ggml_backend_cuda_get_device_count(); ++device) { + ggml_backend_t backend = ggml_backend_cuda_init(device); + if (backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, device); + } + ctx->backends.push_back(backend); } - backends.push_back(ctx->backend); } } #endif @@ -9210,7 +9223,7 @@ struct llama_context * llama_new_context_with_model( delete ctx; return nullptr; } - backends.push_back(ctx->backend_cpu); + ctx->backends.push_back(ctx->backend_cpu); if (!llama_kv_cache_init(ctx->kv_self, ctx->model, type_k, type_v, cparams.n_ctx, cparams.offload_kqv)) { @@ -9252,7 +9265,7 @@ struct llama_context * llama_new_context_with_model( // buffer used to store the computation graph and the tensor meta data ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead()); - ctx->sched = ggml_backend_sched_new(backends.data(), backends.size()); + ctx->sched = ggml_backend_sched_new(ctx->backends.data(), ctx->backends.size()); ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu); // build worst-case graph @@ -9268,7 +9281,7 @@ struct llama_context * llama_new_context_with_model( LLAMA_LOG_INFO("%s: graph splits (measure): %d\n", __func__, n_splits); ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu); - for (ggml_backend_t backend : backends) { + for (ggml_backend_t backend : ctx->backends) { ggml_backend_buffer_t buf = ggml_backend_sched_get_buffer(ctx->sched, backend); LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, ggml_backend_name(backend), From 4813e17548d49188dedb8eb8e1ad6988b875f85e Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 8 Jan 2024 14:00:34 +0100 Subject: [PATCH 17/34] llama : abort ctx if cuda backend init fails --- llama.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 8087e9dad3c87..af5da35a69f73 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9199,10 +9199,12 @@ struct llama_context * llama_new_context_with_model( #elif defined(GGML_USE_CUBLAS) if (model->n_gpu_layers > 0) { // with split_mode LLAMA_SPLIT_NONE or LLAMA_SPLIT_ROW, only the main GPU backend is used - if (model->split_mode == LLAMA_SPLIT_ROW || model->split_mode == LLAMA_SPLIT_NONE) { + if (model->split_mode == LLAMA_SPLIT_NONE || model->split_mode == LLAMA_SPLIT_ROW) { ggml_backend_t backend = ggml_backend_cuda_init(model->main_gpu); if (backend == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, model->main_gpu); + llama_free(ctx); + return nullptr; } ctx->backends.push_back(backend); } else { @@ -9211,6 +9213,8 @@ struct llama_context * llama_new_context_with_model( ggml_backend_t backend = ggml_backend_cuda_init(device); if (backend == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize CUDA%d backend\n", __func__, device); + llama_free(ctx); + return nullptr; } ctx->backends.push_back(backend); } From 11583c14624b8fe4f7d47eb7cab0d4b84d8ae1f3 Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 8 Jan 2024 17:12:12 +0100 Subject: [PATCH 18/34] llama : rewrite lora with ggml-backend and compute on CPU ggml-ci --- ggml-backend.c | 45 ++++---- ggml-backend.h | 2 +- ggml-cuda.cu | 44 +++++++- ggml-impl.h | 2 + ggml.c | 19 +++- ggml.h | 5 + llama.cpp | 284 ++++++++++++++++++++++++++++--------------------- 7 files changed, 256 insertions(+), 145 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index cddd784b95337..aa1ac5b1047a6 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -948,11 +948,7 @@ static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, co // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend // TODO: merge passes static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { - // reset state - size_t hash_size = sched->hash_set.size; - memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); - memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size); - memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size); + // reset splits sched->n_splits = 0; struct ggml_init_params params = { @@ -961,11 +957,13 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g /* .no_alloc = */ true }; - if (sched->ctx != NULL) { - ggml_free(sched->ctx); - } + ggml_free(sched->ctx); sched->ctx = ggml_init(params); + if (sched->ctx == NULL) { + fprintf(stderr, "%s: failed to initialize context\n", __func__); + GGML_ASSERT(false); + } // pass 1: assign backends to ops with allocated inputs for (int i = 0; i < graph->n_leafs; i++) { @@ -1309,13 +1307,23 @@ static void sched_reset(ggml_backend_sched_t sched) { for (int i = 0; i < sched->n_backends; i++) { ggml_tallocr_reset(sched->tallocs[i]); } + // reset state for the next run + size_t hash_size = sched->hash_set.size; + memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); + memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size); + memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size); } -ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends) { +ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends, size_t graph_size) { + GGML_ASSERT(n_backends > 0); GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS); - struct ggml_backend_sched * sched = malloc(sizeof(struct ggml_backend_sched)); - memset(sched, 0, sizeof(struct ggml_backend_sched)); + struct ggml_backend_sched * sched = calloc(sizeof(struct ggml_backend_sched), 1); + + // initialize hash table + sched->hash_set = ggml_hash_set_new(graph_size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS); + sched->node_talloc = calloc(sizeof(sched->node_talloc[0]) * sched->hash_set.size, 1); + sched->node_copies = calloc(sizeof(sched->node_copies[0]) * sched->hash_set.size, 1); sched->n_backends = n_backends; for (int i = 0; i < n_backends; i++) { @@ -1340,6 +1348,7 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) { ggml_tallocr_free(sched->tallocs[i]); } ggml_gallocr_free(sched->galloc); + ggml_free(sched->ctx); free(sched->hash_set.keys); free(sched->node_talloc); free(sched->node_copies); @@ -1347,12 +1356,7 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) { } void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) { - // initialize hash tables - size_t hash_size = measure_graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS; - sched->hash_set.size = hash_size; - sched->hash_set.keys = malloc(sizeof(sched->hash_set.keys[0]) * hash_size); - sched->node_talloc = malloc(sizeof(sched->node_talloc[0]) * hash_size); - sched->node_copies = malloc(sizeof(sched->node_copies[0]) * hash_size); + GGML_ASSERT(ggml_tallocr_is_measure(sched->tallocs[0])); // can only be initialized once sched_split_graph(sched, measure_graph); sched_alloc_splits(sched); @@ -1368,7 +1372,8 @@ void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgr } void ggml_backend_sched_graph_split(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { - GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS); + GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS); + sched_split_graph(sched, graph); } @@ -1385,17 +1390,17 @@ int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) { ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) { int backend_index = sched_backend_prio(sched, backend); + GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); return sched->tallocs[backend_index]; } ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) { int backend_index = sched_backend_prio(sched, backend); + GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); return ggml_tallocr_get_buffer(sched->tallocs[backend_index]); } void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) { - // FIXME: node_allocr is cleared when splitting the graph, so all user assignments are lost - // to avoid this, we need to clear node_allocr after compute rather than before split int backend_index = sched_backend_prio(sched, backend); GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends); node_allocr(node) = sched->tallocs[backend_index]; diff --git a/ggml-backend.h b/ggml-backend.h index a50c17462459d..250a9760c8338 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -149,7 +149,7 @@ extern "C" { typedef struct ggml_backend_sched * ggml_backend_sched_t; // Initialize a backend scheduler - GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends); + GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends, size_t graph_size); GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); // Initialize backend buffers from a measure graph GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 124d7a5f58973..c4ebc584b030b 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9720,16 +9720,56 @@ static void ggml_backend_cuda_split_buffer_set_tensor(ggml_backend_buffer_t buff } } +static void ggml_backend_cuda_split_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { + // split tensors must always be set in their entirety at once + GGML_ASSERT(offset == 0); + GGML_ASSERT(size == ggml_nbytes(tensor)); + + ggml_backend_cuda_split_buffer_type_context * buft_ctx = (ggml_backend_cuda_split_buffer_type_context *)buffer->buft->context; + + const int64_t ne0 = tensor->ne[0]; + const size_t nb1 = tensor->nb[1]; + ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra; + + for (int id = 0; id < g_device_count; ++id) { + int64_t row_low, row_high; + get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, id); + + int64_t nrows_split = row_high - row_low; + if (nrows_split == 0) { + continue; + } + + const size_t offset_split = row_low*nb1; + size_t size = ggml_nbytes_split(tensor, nrows_split); + const size_t original_size = size; + + // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses + if (ne0 % MATRIX_ROW_PADDING != 0) { + size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING); + } + + char * buf_host = (char *)data + offset_split; + //CUDA_CHECK(cudaMemcpy(extra->data_device[id], buf_host, original_size, cudaMemcpyHostToDevice)); + CUDA_CHECK(cudaMemcpy(buf_host, extra->data_device[id], original_size, cudaMemcpyDeviceToHost)); + } +} + +static void ggml_backend_cuda_split_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + UNUSED(buffer); + UNUSED(value); +} + static struct ggml_backend_buffer_i ggml_cuda_backend_split_buffer_interface = { /* .get_name = */ ggml_backend_cuda_split_buffer_get_name, /* .free_buffer = */ ggml_backend_cuda_split_buffer_free_buffer, /* .get_base = */ ggml_backend_cuda_split_buffer_get_base, /* .init_tensor = */ ggml_backend_cuda_split_buffer_init_tensor, /* .set_tensor = */ ggml_backend_cuda_split_buffer_set_tensor, - /* .get_tensor = */ NULL, + /* .get_tensor = */ ggml_backend_cuda_split_buffer_get_tensor, /* .cpy_tensor_from = */ NULL, /* .cpy_tensor_to = */ NULL, - /* .clear = */ NULL, + /* .clear = */ ggml_backend_cuda_split_buffer_clear, }; // cuda split buffer type diff --git a/ggml-impl.h b/ggml-impl.h index 2faced08059ed..2c58075ac7c56 100644 --- a/ggml-impl.h +++ b/ggml-impl.h @@ -228,6 +228,8 @@ inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) { #define GGML_HASHTABLE_FULL ((size_t)-1) #define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2) +struct ggml_hash_set ggml_hash_set_new(size_t size); + bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key); // returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted diff --git a/ggml.c b/ggml.c index 0f7d9d40cc461..90814b8e2dbe7 100644 --- a/ggml.c +++ b/ggml.c @@ -4343,6 +4343,23 @@ struct ggml_tensor * ggml_cpy_inplace( return ggml_cpy_impl(ctx, a, b, true); } +struct ggml_tensor * ggml_cast( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_type type) { + bool is_node = false; + + struct ggml_tensor * result = ggml_new_tensor(ctx, type, GGML_MAX_DIMS, a->ne); + ggml_format_name(result, "%s (copy)", a->name); + + result->op = GGML_OP_CPY; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; + result->src[0] = a; + result->src[1] = result; + + return result; +} + // ggml_cont static struct ggml_tensor * ggml_cont_impl( @@ -14835,7 +14852,7 @@ size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tenso return i; } -static struct ggml_hash_set ggml_hash_set_new(size_t size) { +struct ggml_hash_set ggml_hash_set_new(size_t size) { size = ggml_hash_size(size); struct ggml_hash_set result; result.size = size; diff --git a/ggml.h b/ggml.h index 64f4e45e880fa..163e2d2e5631c 100644 --- a/ggml.h +++ b/ggml.h @@ -1165,6 +1165,11 @@ extern "C" { struct ggml_tensor * a, struct ggml_tensor * b); + GGML_API struct ggml_tensor * ggml_cast( + struct ggml_context * ctx, + struct ggml_tensor * a, + enum ggml_type type); + // make contiguous GGML_API struct ggml_tensor * ggml_cont( struct ggml_context * ctx, diff --git a/llama.cpp b/llama.cpp index af5da35a69f73..8b0ba7121ea73 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2451,8 +2451,11 @@ struct llama_model_loader { const size_t offs = file_offset(ggml_get_name(cur)); if (use_mmap && mapping) { - GGML_ASSERT(cur->data == nullptr); - cur->data = (uint8_t *)mapping->addr + offs; + if (cur->data == nullptr) { + cur->data = (uint8_t *)mapping->addr + offs; + } else { + memcpy(cur->data, (uint8_t *)mapping->addr + offs, ggml_nbytes(cur)); + } } else { GGML_ASSERT(cur->data != nullptr); file.seek(offs, SEEK_SET); @@ -8769,48 +8772,23 @@ static int llama_apply_lora_from_file_internal( LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling); - // create a name -> tensor map of the model to accelerate lookups - // find the max tensor size to estimate the required temporary buffer size - size_t max_tensor_size = 0; - std::unordered_map model_tensors; - for (const auto & kv : model.tensors_by_name) { - model_tensors.insert(kv); - size_t f32_size = ggml_nelements(kv.second) * sizeof(float); - max_tensor_size = std::max(max_tensor_size, f32_size); - } - - // create a temporary ggml context to store the lora tensors - // TODO: use ggml-alloc - size_t lora_ctx_size = max_tensor_size * 3; - LLAMA_LOG_INFO("%s: allocating %.f MB for lora temporary buffer\n", __func__, lora_ctx_size / 1024.0 / 1024.0); - std::vector lora_buf(lora_ctx_size); - - struct ggml_init_params params; - params.mem_size = lora_buf.size(); - params.mem_buffer = lora_buf.data(); - params.no_alloc = false; - - using unique_context = std::unique_ptr; - - unique_context lora_ctx(nullptr, ggml_free); - lora_ctx.reset(ggml_init(params)); - std::unordered_map lora_tensors; - // load base model std::unique_ptr ml; - - if (path_base_model) { + if (path_base_model) { LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model); ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr)); - ml->init_mapping(false); // no prefetching + ml->init_mapping(/*prefetch*/ false); // no prefetching } - // read tensors and apply - bool warned = false; - int n_tensors = 0; - - std::vector work_buffer; + struct tensor_meta { + std::string name; + ggml_type type; + int32_t ne[2]; + size_t offset; + }; + std::map tensor_meta_map; + // load all tensor meta while (true) { if (fin.tell() == fin.size) { // eof @@ -8823,7 +8801,7 @@ static int llama_apply_lora_from_file_internal( fin.read_raw(&n_dims, sizeof(n_dims)); fin.read_raw(&name_len, sizeof(name_len)); - fin.read_raw(&ftype, sizeof(ftype)); + fin.read_raw(&ftype, sizeof(ftype)); if (n_dims != 1 && n_dims != 2) { LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims); @@ -8837,31 +8815,23 @@ static int llama_apply_lora_from_file_internal( std::string name; { - GGML_ASSERT(name_len <= 1024); - char buf[1024]; + GGML_ASSERT(name_len < GGML_MAX_NAME); + char buf[GGML_MAX_NAME]; fin.read_raw(buf, name_len); name = std::string(buf, name_len); } - // check for lora suffix and get the type of tensor - const std::string lora_suffix = ".lora"; - size_t pos = name.rfind(lora_suffix); - if (pos == std::string::npos) { - LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str()); - return 1; + // check for lora suffix + std::string lora_suffix; + if (name.length() > 6) { + lora_suffix = name.substr(name.length() - 6); } - - std::string lora_type = name.substr(pos + lora_suffix.length()); - std::string base_name = name; - base_name.erase(pos); - // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(), base_name.c_str(), lora_type.c_str()); - - if (model_tensors.find(base_name) == model_tensors.end()) { - LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data()); + if (lora_suffix != ".loraA" && lora_suffix != ".loraB") { + LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str()); return 1; } - // create ggml tensor + // tensor type ggml_type wtype; switch (ftype) { case 0: wtype = GGML_TYPE_F32; break; @@ -8873,105 +8843,177 @@ static int llama_apply_lora_from_file_internal( return false; } } - ggml_tensor * lora_tensor = ggml_new_tensor_2d(lora_ctx.get(), wtype, ne[0], ne[1]); - ggml_set_name(lora_tensor, name.c_str()); - // load tensor data + // data offset size_t offset = fin.tell(); - size_t tensor_data_size = ggml_nbytes(lora_tensor); offset = (offset + 31) & -32; - fin.seek(offset, SEEK_SET); - fin.read_raw(lora_tensor->data, tensor_data_size); - lora_tensors[name] = lora_tensor; + // skip tensor data + fin.seek(offset + ggml_row_size(wtype, ne[0]) * ne[1], SEEK_SET); - // check if we have both A and B tensors and apply - if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() && - lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) { + tensor_meta_map.emplace(name, tensor_meta{ name, wtype, { ne[0], ne[1] }, offset }); + } - ggml_tensor * dest_t = model_tensors[base_name]; + bool warned = false; + int n_tensors = 0; - // FIXME: ggml-backend + // apply + ggml_backend_t backend_cpu = ggml_backend_cpu_init(); + if (backend_cpu == nullptr) { + LLAMA_LOG_ERROR("%s: error: failed to initialize cpu backend\n", __func__); + return 1; + } + ggml_backend_cpu_set_n_threads(backend_cpu, n_threads); - ggml_tensor * base_t; - if (ml) { - struct gguf_context * ctx_gguf = ml->ctx_gguf; + std::vector> read_buf; + for (const auto & it : model.tensors_by_name) { + const std::string & base_name = it.first; + ggml_tensor * model_t = it.second; - // load from base model - if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) { - LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); - return 1; - } - - base_t = ml->get_tensor_meta(base_name.c_str()); - ml->load_data_for(base_t); - } else { - base_t = dest_t; - } + if (tensor_meta_map.find(base_name + ".loraA") == tensor_meta_map.end() || + tensor_meta_map.find(base_name + ".loraB") == tensor_meta_map.end()) { + continue; + } - if (ggml_is_quantized(base_t->type)) { - if (!warned) { - LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, " - "use a f16 or f32 base model with --lora-base\n", __func__); - warned = true; - } - } + tensor_meta & metaA = tensor_meta_map.at(base_name + ".loraA"); + tensor_meta & metaB = tensor_meta_map.at(base_name + ".loraB"); - ggml_tensor * loraA = lora_tensors[base_name + ".loraA"]; - GGML_ASSERT(loraA->type == GGML_TYPE_F32); - ggml_set_name(loraA, "loraA"); + ggml_init_params lora_init_params = { + /* .mem_size */ ggml_tensor_overhead()*128 + ggml_graph_overhead(), + /* .mem_buffer */ nullptr, + /* .no_alloc */ true, + }; + ggml_context * lora_ctx = ggml_init(lora_init_params); + if (lora_ctx == nullptr) { + LLAMA_LOG_ERROR("%s: error: failed to initialize lora context\n", __func__); + ggml_backend_free(backend_cpu); + return 1; + } - ggml_tensor * loraB = lora_tensors[base_name + ".loraB"]; - GGML_ASSERT(loraB->type == GGML_TYPE_F32); - ggml_set_name(loraB, "loraB"); + // create tensors + ggml_tensor * loraA = ggml_new_tensor_2d(lora_ctx, metaA.type, metaA.ne[0], metaA.ne[1]); + ggml_tensor * loraB = ggml_new_tensor_2d(lora_ctx, metaB.type, metaB.ne[0], metaB.ne[1]); + ggml_set_name(loraA, metaA.name.c_str()); + ggml_set_name(loraB, metaB.name.c_str()); - if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { - LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" - " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]); + ggml_tensor * base_t; + if (ml) { + if (gguf_find_tensor(ml->ctx_gguf, base_name.c_str()) < 0) { + LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); return 1; } + base_t = ggml_dup_tensor(lora_ctx, ml->get_tensor_meta(base_name.c_str())); + } else { + base_t = ggml_dup_tensor(lora_ctx, model_t); + } + ggml_set_name(base_t, base_name.c_str()); + + // allocate in backend buffer + ggml_backend_buffer_t lora_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type()); + if (lora_buf == nullptr) { + LLAMA_LOG_ERROR("%s: error: failed to allocate lora tensors\n", __func__); + return 1; + } + + // load tensor data + auto load_tensor = [&read_buf, &fin](const tensor_meta & tensor_meta, ggml_tensor * tensor) { + read_buf.resize(ggml_nbytes(tensor)); + fin.seek(tensor_meta.offset, SEEK_SET); + fin.read_raw(read_buf.data(), ggml_nbytes(tensor)); + ggml_backend_tensor_set(tensor, read_buf.data(), 0, read_buf.size()); + }; + load_tensor(metaA, loraA); + load_tensor(metaB, loraB); + + // load base model tensor data + if (ml) { + ml->load_data_for(base_t); + } else { + ggml_backend_tensor_copy(model_t, base_t); + } + + if (ggml_is_quantized(base_t->type) && !warned) { + LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, " + "use a f16 or f32 base model with --lora-base\n", __func__); + warned = true; + } + + if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { + LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" + " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]); + ggml_free(lora_ctx); + ggml_backend_buffer_free(lora_buf); + ggml_backend_free(backend_cpu); + return 1; + } + auto build_lora_graph = [&]() { // w = w + BA*s - ggml_tensor * BA = ggml_mul_mat(lora_ctx.get(), loraA, loraB); + ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB); ggml_set_name(BA, "BA"); if (scaling != 1.0f) { - BA = ggml_scale_inplace(lora_ctx.get(), BA, scaling); + BA = ggml_scale(lora_ctx, BA, scaling); ggml_set_name(BA, "BA_scaled"); } ggml_tensor * r; - if (base_t == dest_t) { - r = ggml_add_inplace(lora_ctx.get(), dest_t, BA); - ggml_set_name(r, "r_add_inplace"); - } - else { - r = ggml_add(lora_ctx.get(), base_t, BA); - ggml_set_name(r, "r_add"); + r = ggml_add_inplace(lora_ctx, base_t, BA); + ggml_set_name(r, "r_add"); - r = ggml_cpy(lora_ctx.get(), r, dest_t); - ggml_set_name(r, "r_cpy"); + if (base_t->type != model_t->type) { + // convert the result to the model type + r = ggml_cast(lora_ctx, r, model_t->type); + ggml_set_name(r, "r_cast"); } - struct ggml_cgraph * gf = ggml_new_graph(lora_ctx.get()); - ggml_build_forward_expand(gf, r); + return r; + }; - ggml_graph_compute_helper(work_buffer, gf, n_threads); + ggml_cgraph * gf = ggml_new_graph(lora_ctx); + ggml_tensor * r = build_lora_graph(); + ggml_build_forward_expand(gf, r); - // the tensors in the adapter must be sorted such that loraA and loraB of the same tensor are next to each other - GGML_ASSERT(lora_tensors.size() == 2); + ggml_backend_buffer_t graph_buf = ggml_backend_alloc_ctx_tensors_from_buft(lora_ctx, ggml_backend_cpu_buffer_type()); + if (graph_buf == nullptr) { + LLAMA_LOG_ERROR("%s: error: failed to allocate graph tensors\n", __func__); + ggml_free(lora_ctx); + ggml_backend_buffer_free(lora_buf); + ggml_backend_free(backend_cpu); + return 1; + } - // we won't need these tensors again, reset the context to save memory - lora_ctx.reset(ggml_init(params)); - lora_tensors.clear(); + ggml_backend_graph_compute(backend_cpu, gf); - n_tensors++; - if (n_tensors % 4 == 0) { - LLAMA_LOG_INFO("."); - } + ggml_backend_tensor_set(model_t, r->data, 0, ggml_nbytes(r)); + +#if 0 + // TODO: use scheduler with fallback to CPU for less copies between CPU and GPU + //ggml_backend_sched_t sched = ggml_backend_sched_new(backends.data(), backends.size(), GGML_DEFAULT_GRAPH_SIZE); + + // sched compute + ggml_build_forward_expand(gf, build_graph()); + ggml_backend_sched_init_measure(sched, gf); + + // create the graph again, since the previous one was destroyed by the measure + ggml_graph_clear(gf); + ggml_build_forward_expand(gf, build_graph()); + ggml_backend_sched_graph_compute(sched, gf); + ggml_backend_sched_free(sched); +#endif + + ggml_backend_buffer_free(lora_buf); + ggml_backend_buffer_free(graph_buf); + ggml_free(lora_ctx); + + n_tensors++; + if (n_tensors % 4 == 0) { + LLAMA_LOG_INFO("."); } } + ggml_backend_free(backend_cpu); + const int64_t t_lora_us = ggml_time_us() - t_start_lora_us; LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0); @@ -9269,7 +9311,7 @@ struct llama_context * llama_new_context_with_model( // buffer used to store the computation graph and the tensor meta data ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead()); - ctx->sched = ggml_backend_sched_new(ctx->backends.data(), ctx->backends.size()); + ctx->sched = ggml_backend_sched_new(ctx->backends.data(), ctx->backends.size(), LLAMA_MAX_NODES); ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu); // build worst-case graph From 4ed5f621be79f8104df86c992850733a57bb3c2a Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 8 Jan 2024 18:17:18 +0100 Subject: [PATCH 19/34] llama : only map to a backend buffer the region of the file mapping containing the tensors used in the buffer --- ggml-cuda.cu | 1 - llama.cpp | 25 ++++++++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index c4ebc584b030b..3d9473e3fd512 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9750,7 +9750,6 @@ static void ggml_backend_cuda_split_buffer_get_tensor(ggml_backend_buffer_t buff } char * buf_host = (char *)data + offset_split; - //CUDA_CHECK(cudaMemcpy(extra->data_device[id], buf_host, original_size, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(buf_host, extra->data_device[id], original_size, cudaMemcpyDeviceToHost)); } } diff --git a/llama.cpp b/llama.cpp index 8b0ba7121ea73..35de91b6174ac 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2446,6 +2446,18 @@ struct llama_model_loader { } } + void get_mapping_range(size_t * first, size_t * last, ggml_context * ctx) const { + GGML_ASSERT(mapping); + + *first = mapping->size; + *last = 0; + for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) { + const size_t offs = file_offset(ggml_get_name(tensor)); + *first = std::min(*first, offs); + *last = std::max(*last, offs + ggml_nbytes(tensor)); + } + } + // for backwards compatibility, does not support ggml-backend void load_data_for(struct ggml_tensor * cur) const { const size_t offs = file_offset(ggml_get_name(cur)); @@ -3733,15 +3745,19 @@ static bool llm_load_tensors( ggml_context * ctx = it.second; ggml_backend_buffer_t buf = nullptr; - // TODO: do not use the whole model mapping for the buffer, only the region containing the tensors - // this is important for metal: if the entire model could be mapped to a metal buffer, then we could use metal for all layers + // only the region containing the tensors in the model is mapped to the backend buffer + // this is important for metal: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers if (ml.use_mmap && buft == llama_default_buffer_type_cpu(true)) { - buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size); + size_t first, last; + ml.get_mapping_range(&first, &last, ctx); + buf = ggml_backend_cpu_buffer_from_ptr((char *) ml.mapping->addr + first, last - first); } #ifdef GGML_USE_METAL else if (ml.use_mmap && buft == ggml_backend_metal_buffer_type()) { const size_t max_size = ggml_get_max_tensor_size(ctx); - buf = ggml_backend_metal_buffer_from_ptr(ml.mapping->addr, ml.mapping->size, max_size); + size_t first, last; + ml.get_mapping_range(&first, &last, ctx); + buf = ggml_backend_metal_buffer_from_ptr((char *) ml.mapping->addr + first, last - first, max_size); } #endif else { @@ -5922,7 +5938,6 @@ static struct ggml_cgraph * llama_build_graph( alloc_inp_tokens = true; } - // FIXME: allocating this conditionally will result in issues with the measure allocator if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0 && batch.embd) { ggml_tallocr_alloc(lctx.alloc, cur); From fa7620116eec31ebb41a3b025e50efb49db4d917 Mon Sep 17 00:00:00 2001 From: slaren Date: Tue, 9 Jan 2024 03:14:16 +0100 Subject: [PATCH 20/34] opencl : add ggml-backend buffer type --- ggml-alloc.c | 1 + ggml-backend-impl.h | 14 +- ggml-backend.c | 31 ++-- ggml-backend.h | 8 +- ggml-cuda.cu | 52 ++----- ggml-metal.m | 9 +- ggml-opencl.cpp | 335 ++++++++++++++++++++++++++++++++++++++++++-- ggml-opencl.h | 16 ++- ggml.c | 2 +- ggml.h | 4 +- llama.cpp | 79 +++++------ 11 files changed, 419 insertions(+), 132 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index 5e730aa9a9d96..7836f064e8d89 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -229,6 +229,7 @@ void ggml_tallocr_reset(ggml_tallocr_t alloc) { alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows } else { alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset; + ggml_backend_buffer_reset(alloc->buffer); } } diff --git a/ggml-backend-impl.h b/ggml-backend-impl.h index ab6d96c681e94..859e923e26200 100644 --- a/ggml-backend-impl.h +++ b/ggml-backend-impl.h @@ -19,7 +19,7 @@ extern "C" { const char * (*get_name) (ggml_backend_buffer_type_t buft); ggml_backend_buffer_t (*alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size); size_t (*get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment - size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding + size_t (*get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding bool (*supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend // check if tensor data is in host memory // should be equivalent to supports_backend(buft, ggml_backend_cpu_init()) @@ -37,15 +37,15 @@ extern "C" { struct ggml_backend_buffer_i { const char * (*get_name) (ggml_backend_buffer_t buffer); void (*free_buffer) (ggml_backend_buffer_t buffer); - //void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras void * (*get_base) (ggml_backend_buffer_t buffer); void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); // (optional) copy tensor between different buffer-type, allow for single-copy tranfers - void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); void (*clear) (ggml_backend_buffer_t buffer, uint8_t value); + void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras }; struct ggml_backend_buffer { @@ -82,13 +82,13 @@ extern "C" { void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); // (optional) asynchroneous tensor copy - void (*cpy_tensor_from_async)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to_async) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_from_async)(ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst); + void (*cpy_tensor_to_async) (ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst); void (*synchronize)(ggml_backend_t backend); // compute graph with a plan - ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph); + ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph); void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); diff --git a/ggml-backend.c b/ggml-backend.c index aa1ac5b1047a6..25d9c98d593ab 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -103,11 +103,11 @@ void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_t } size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer) { - return ggml_backend_buft_get_alignment(ggml_backend_buffer_type(buffer)); + return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer)); } size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { - return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type(buffer), tensor); + return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor); } void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { @@ -115,17 +115,23 @@ void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { } bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) { - return ggml_backend_buft_is_host(ggml_backend_buffer_type(buffer)); + return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer)); } void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) { buffer->usage = usage; } -ggml_backend_buffer_type_t ggml_backend_buffer_type(ggml_backend_buffer_t buffer) { +ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) { return buffer->buft; } +void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) { + if (buffer->iface.reset) { + buffer->iface.reset(buffer); + } +} + // backend const char * ggml_backend_name(ggml_backend_t backend) { @@ -431,13 +437,13 @@ static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, con GGML_UNUSED(buffer); } -static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) { +static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); GGML_UNUSED(buffer); } -static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) { +static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); GGML_UNUSED(buffer); @@ -457,6 +463,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i = { /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from, /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to, /* .clear = */ ggml_backend_cpu_buffer_clear, + /* .reset = */ NULL, }; // for buffers from ptr, free is not called @@ -470,6 +477,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = { /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from, /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to, /* .clear = */ ggml_backend_cpu_buffer_clear, + /* .reset = */ NULL, }; static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512 @@ -554,7 +562,6 @@ static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_ return NULL; } - // FIXME: this is a hack to avoid having to implement a new buffer type ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); buffer->buft = buft; buffer->iface.get_name = ggml_backend_cpu_hbm_buffer_get_name; @@ -610,7 +617,7 @@ struct ggml_backend_plan_cpu { struct ggml_cgraph cgraph; }; -static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) { +static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, const struct ggml_cgraph * cgraph) { struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context; struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu)); @@ -1371,14 +1378,10 @@ void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgr sched_reset(sched); } -void ggml_backend_sched_graph_split(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { +void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS); sched_split_graph(sched, graph); -} - -void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { - ggml_backend_sched_graph_split(sched, graph); sched_alloc_splits(sched); sched_compute_splits(sched); sched_reset(sched); @@ -1410,7 +1413,7 @@ void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { GGML_ASSERT(tensor->buffer == NULL); - //GGML_ASSERT(tensor->data == NULL); // views of pre-allocted tensors may have the data set, but still need to be initialized + //GGML_ASSERT(tensor->data == NULL); // views of pre-allocated tensors may have the data set in ggml_new_tensor, but still need to be initialized by the backend GGML_ASSERT(tensor->view_src != NULL); GGML_ASSERT(tensor->view_src->buffer != NULL); GGML_ASSERT(tensor->view_src->data != NULL); diff --git a/ggml-backend.h b/ggml-backend.h index 250a9760c8338..c4eff546a3d22 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -40,7 +40,8 @@ extern "C" { GGML_API void ggml_backend_buffer_clear (ggml_backend_buffer_t buffer, uint8_t value); GGML_API bool ggml_backend_buffer_is_host (ggml_backend_buffer_t buffer); GGML_API void ggml_backend_buffer_set_usage (ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage); - GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_type (ggml_backend_buffer_t buffer); + GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer); + GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer); // @@ -166,11 +167,6 @@ extern "C" { ggml_backend_sched_t sched, struct ggml_cgraph * graph); - // Split without computing - only useful to find the number of splits - GGML_API void ggml_backend_sched_graph_split( - ggml_backend_sched_t sched, - struct ggml_cgraph * graph); - // // Utils // diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 3d9473e3fd512..61c5f67bc68bf 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9487,7 +9487,7 @@ static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t CUDA_CHECK(cudaDeviceSynchronize()); } -static struct ggml_backend_buffer_i ggml_cuda_backend_buffer_interface = { +static ggml_backend_buffer_i ggml_backend_cuda_buffer_interface = { /* .get_name = */ ggml_backend_cuda_buffer_get_name, /* .free_buffer = */ ggml_backend_cuda_buffer_free_buffer, /* .get_base = */ ggml_backend_cuda_buffer_get_base, @@ -9497,6 +9497,7 @@ static struct ggml_backend_buffer_i ggml_cuda_backend_buffer_interface = { /* .cpy_tensor_from = */ NULL, /* .cpy_tensor_to = */ NULL, /* .clear = */ ggml_backend_cuda_buffer_clear, + /* .reset = */ NULL, }; // cuda buffer type @@ -9528,7 +9529,7 @@ static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_bac ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(buft_ctx->device, dev_ptr); - return ggml_backend_buffer_init(buft, ggml_cuda_backend_buffer_interface, ctx, size); + return ggml_backend_buffer_init(buft, ggml_backend_cuda_buffer_interface, ctx, size); } static size_t ggml_backend_cuda_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { @@ -9537,7 +9538,7 @@ static size_t ggml_backend_cuda_buffer_type_get_alignment(ggml_backend_buffer_ty UNUSED(buft); } -static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, ggml_tensor * tensor) { +static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { int64_t row_low = 0; int64_t row_high = ggml_nrows(tensor); int64_t nrows_split = row_high - row_low; @@ -9574,7 +9575,7 @@ static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = { /* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment, /* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size, /* .supports_backend = */ ggml_backend_cuda_buffer_type_supports_backend, - /* .is_host = */ nullptr, + /* .is_host = */ NULL, }; ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { @@ -9583,7 +9584,7 @@ ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) { return nullptr; } - static struct ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES]; + static ggml_backend_buffer_type ggml_backend_cuda_buffer_types[GGML_CUDA_MAX_DEVICES]; static bool ggml_backend_cuda_buffer_type_initialized = false; @@ -9759,7 +9760,7 @@ static void ggml_backend_cuda_split_buffer_clear(ggml_backend_buffer_t buffer, u UNUSED(value); } -static struct ggml_backend_buffer_i ggml_cuda_backend_split_buffer_interface = { +static struct ggml_backend_buffer_i ggml_backend_cuda_split_buffer_interface = { /* .get_name = */ ggml_backend_cuda_split_buffer_get_name, /* .free_buffer = */ ggml_backend_cuda_split_buffer_free_buffer, /* .get_base = */ ggml_backend_cuda_split_buffer_get_base, @@ -9769,6 +9770,7 @@ static struct ggml_backend_buffer_i ggml_cuda_backend_split_buffer_interface = { /* .cpy_tensor_from = */ NULL, /* .cpy_tensor_to = */ NULL, /* .clear = */ ggml_backend_cuda_split_buffer_clear, + /* .reset = */ NULL, }; // cuda split buffer type @@ -9786,7 +9788,7 @@ static ggml_backend_buffer_t ggml_backend_cuda_split_buffer_type_alloc_buffer(gg // as returned by get_alloc_size. this limit is enforced during tensor allocation by ggml-alloc, so it must be correct. ggml_backend_cuda_split_buffer_context * ctx = new ggml_backend_cuda_split_buffer_context(); - return ggml_backend_buffer_init(buft, ggml_cuda_backend_split_buffer_interface, ctx, size); + return ggml_backend_buffer_init(buft, ggml_backend_cuda_split_buffer_interface, ctx, size); } static size_t ggml_backend_cuda_split_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { @@ -9795,7 +9797,7 @@ static size_t ggml_backend_cuda_split_buffer_type_get_alignment(ggml_backend_buf UNUSED(buft); } -static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, ggml_tensor * tensor) { +static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { ggml_backend_cuda_split_buffer_type_context * ctx = (ggml_backend_cuda_split_buffer_type_context *)buft->context; size_t total_size = 0; @@ -9903,7 +9905,6 @@ static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggm return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); } - // FIXME: this is a hack to avoid having to implement a new buffer type ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); buffer->buft = buft; buffer->iface.get_name = ggml_backend_cuda_host_buffer_name; @@ -9975,29 +9976,6 @@ static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { UNUSED(backend); } -static ggml_backend_graph_plan_t ggml_backend_cuda_graph_plan_create(ggml_backend_t backend, ggml_cgraph * cgraph) { - GGML_ASSERT(!"not implemented"); - - return nullptr; - - UNUSED(backend); - UNUSED(cgraph); -} - -static void ggml_backend_cuda_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { - GGML_ASSERT(!"not implemented"); - - UNUSED(backend); - UNUSED(plan); -} - -static void ggml_backend_cuda_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { - GGML_ASSERT(!"not implemented"); - - UNUSED(backend); - UNUSED(plan); -} - static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; @@ -10149,7 +10127,7 @@ static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_ten UNUSED(backend); } -static ggml_backend_i cuda_backend_i = { +static ggml_backend_i ggml_backend_cuda_interface = { /* .get_name = */ ggml_backend_cuda_name, /* .free = */ ggml_backend_cuda_free, /* .get_default_buffer_type = */ ggml_backend_cuda_get_default_buffer_type, @@ -10158,9 +10136,9 @@ static ggml_backend_i cuda_backend_i = { /* .cpy_tensor_from_async = */ NULL, /* .cpy_tensor_to_async = */ NULL, /* .synchronize = */ ggml_backend_cuda_synchronize, - /* .graph_plan_create = */ ggml_backend_cuda_graph_plan_create, - /* .graph_plan_free = */ ggml_backend_cuda_graph_plan_free, - /* .graph_plan_compute = */ ggml_backend_cuda_graph_plan_compute, + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_compute = */ NULL, /* .graph_compute = */ ggml_backend_cuda_graph_compute, /* .supports_op = */ ggml_backend_cuda_supports_op, }; @@ -10182,7 +10160,7 @@ ggml_backend_t ggml_backend_cuda_init(int device) { }; ggml_backend_t cuda_backend = new ggml_backend { - /* .interface = */ cuda_backend_i, + /* .interface = */ ggml_backend_cuda_interface, /* .context = */ ctx }; diff --git a/ggml-metal.m b/ggml-metal.m index 00b377a3e1389..5e530e280f9de 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2481,13 +2481,13 @@ static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, c UNUSED(buffer); } -static void ggml_backend_metal_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) { +static void ggml_backend_metal_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); UNUSED(buffer); } -static void ggml_backend_metal_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, struct ggml_tensor * src, struct ggml_tensor * dst) { +static void ggml_backend_metal_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); UNUSED(buffer); @@ -2509,6 +2509,7 @@ static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_ /* .cpy_tensor_from = */ ggml_backend_metal_buffer_cpy_tensor_from, /* .cpy_tensor_to = */ ggml_backend_metal_buffer_cpy_tensor_to, /* .clear = */ ggml_backend_metal_buffer_clear, + /* .reset = */ NULL, }; // default buffer type @@ -2715,7 +2716,7 @@ static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct UNUSED(backend); } -static struct ggml_backend_i metal_backend_i = { +static struct ggml_backend_i ggml_backend_metal_i = { /* .get_name = */ ggml_backend_metal_name, /* .free = */ ggml_backend_metal_free, /* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type, @@ -2741,7 +2742,7 @@ ggml_backend_t ggml_backend_metal_init(void) { ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend)); *metal_backend = (struct ggml_backend) { - /* .interface = */ metal_backend_i, + /* .interface = */ ggml_backend_metal_i, /* .context = */ ctx, }; diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 496f9cdca542d..cfa766eb1fbb2 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -1,5 +1,6 @@ #include "ggml.h" #include "ggml-opencl.h" +#include "ggml-backend-impl.h" #include #include @@ -10,7 +11,7 @@ #include #include -#define CL_TARGET_OPENCL_VERSION 110 +#define CL_TARGET_OPENCL_VERSION 120 #include #if defined(_MSC_VER) @@ -929,6 +930,11 @@ static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, co } void ggml_cl_init(void) { + static bool initialized = false; + if (initialized) { + return; + } + cl_int err; struct cl_device; @@ -1483,8 +1489,8 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr } else { d_X = ggml_cl_pool_malloc(sizeof(float) * x_ne, &x_size); } - cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size); - cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size); + cl_mem d_Y = src1->backend == GGML_BACKEND_GPU ? (cl_mem) src1->extra : ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size); + cl_mem d_D = dst->backend == GGML_BACKEND_GPU ? (cl_mem) dst->extra : ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size); size_t x_offset = 0; @@ -1501,7 +1507,9 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) { // copy src1 to device - CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); + if (src1->backend == GGML_BACKEND_CPU) { + CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); + } CL_CHECK(clFinish(queue)); @@ -1522,8 +1530,10 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr } // copy dst to host - float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL)); + if (dst->backend == GGML_BACKEND_CPU) { + float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); + CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL)); + } } } } @@ -1532,8 +1542,12 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr if (src0->backend != GGML_BACKEND_GPU) { ggml_cl_pool_free(d_X, x_size); } - ggml_cl_pool_free(d_Y, y_size); - ggml_cl_pool_free(d_D, d_size); + if (src1->backend != GGML_BACKEND_GPU) { + ggml_cl_pool_free(d_Y, y_size); + } + if (dst->backend != GGML_BACKEND_GPU) { + ggml_cl_pool_free(d_D, d_size); + } } static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t wsize) { @@ -1598,6 +1612,8 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL)); } + // FIXME: convert on device + for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) { // convert src1 to fp16 // TODO: use multiple threads @@ -1643,11 +1659,13 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr } // copy dst to host, then convert to float - CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL)); - - float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); - - ggml_fp16_to_fp32_row(tmp, d, d_ne); + if (dst->backend == GGML_BACKEND_CPU) { + CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL)); + float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3); + ggml_fp16_to_fp32_row(tmp, d, d_ne); + } else { + // FIXME: convert dst to fp32 on device + } } } } @@ -1801,7 +1819,7 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor * } -bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { +bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst) { const int64_t ne10 = src1->ne[0]; const int64_t ne0 = dst->ne[0]; @@ -1895,3 +1913,292 @@ void ggml_cl_transform_tensor(void * data, ggml_tensor * tensor) { tensor->extra = dst; GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); } + +// ggml-backend + +// buffer + +struct ggml_backend_opencl_buffer_context { + ~ggml_backend_opencl_buffer_context() { + if (buffer) { + clReleaseMemObject(buffer); + } + for (auto * sub_buffer : sub_buffers) { + clReleaseMemObject(sub_buffer); + } + } + + cl_mem buffer; + std::vector sub_buffers; +}; + +static void * const cl_ptr_base = (void *)(uintptr_t) 0x1000; + +static const char * ggml_backend_opencl_buffer_get_name(ggml_backend_buffer_t buffer) { + return "OpenCL"; + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_buffer_free_buffer(ggml_backend_buffer_t buffer) { + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + delete ctx; +} + +static void * ggml_backend_opencl_buffer_get_base(ggml_backend_buffer_t buffer) { + return cl_ptr_base; + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { + if (tensor->view_src != NULL && tensor->view_offs == 0) { + tensor->extra = tensor->view_src->extra; + } else { + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + cl_buffer_region region = {(size_t)((char *)tensor->data - (char *)cl_ptr_base), ggml_nbytes(tensor)}; + cl_int err; + cl_mem sub_buffer = clCreateSubBuffer(ctx->buffer, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err); + CL_CHECK(err); + ctx->sub_buffers.push_back(sub_buffer); + tensor->extra = sub_buffer; + } + tensor->backend = GGML_BACKEND_GPU; +} + +static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + cl_mem tensor_buffer = (cl_mem) tensor->extra; + CL_CHECK(clEnqueueWriteBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL)); + CL_CHECK(clFinish(queue)); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { + cl_mem tensor_buffer = (cl_mem) tensor->extra; + CL_CHECK(clEnqueueReadBuffer(queue, tensor_buffer, true, offset, size, data, 0, NULL, NULL)); + CL_CHECK(clFinish(queue)); + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + CL_CHECK(clEnqueueFillBuffer(queue, ctx->buffer, &value, sizeof(value), 0, buffer->size, 0, NULL, NULL)); + CL_CHECK(clFinish(queue)); +} + +static void ggml_backend_opencl_buffer_reset(ggml_backend_buffer_t buffer) { + ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context; + for (auto * sub_buffer : ctx->sub_buffers) { + clReleaseMemObject(sub_buffer); + } + ctx->sub_buffers.clear(); +} + +static ggml_backend_buffer_i ggml_backend_opencl_buffer_interface = { + /* .get_name = */ ggml_backend_opencl_buffer_get_name, + /* .free_buffer = */ ggml_backend_opencl_buffer_free_buffer, + /* .get_base = */ ggml_backend_opencl_buffer_get_base, + /* .init_tensor = */ ggml_backend_opencl_buffer_init_tensor, + /* .set_tensor = */ ggml_backend_opencl_buffer_set_tensor, + /* .get_tensor = */ ggml_backend_opencl_buffer_get_tensor, + /* .cpy_tensor_from = */ NULL, + /* .cpy_tensor_to = */ NULL, + /* .clear = */ ggml_backend_opencl_buffer_clear, + /* .reset = */ ggml_backend_opencl_buffer_reset, +}; + +// buffer type + +static const char * ggml_backend_opencl_buffer_type_name(ggml_backend_buffer_type_t buffer_type) { + return "OpenCL"; + + GGML_UNUSED(buffer_type); +} + +static ggml_backend_buffer_t ggml_backend_opencl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buffer_type, size_t size) { + ggml_cl_init(); + + cl_int err; + cl_mem mem = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &err); + if (err != CL_SUCCESS) { + fprintf(stderr, "%s: failed to allocate %.2f MiB\n", __func__, size / 1024.0 / 1024.0); + return nullptr; + } + + ggml_backend_opencl_buffer_context * ctx = new ggml_backend_opencl_buffer_context{mem, {}}; + + return ggml_backend_buffer_init(buffer_type, ggml_backend_opencl_buffer_interface, ctx, size); +} + +static size_t ggml_backend_opencl_buffer_type_get_alignment(ggml_backend_buffer_type_t buffer_type) { + // FIXME: not thread safe, device may not be initialized yet + static cl_uint alignment = -1; + if (alignment == (cl_uint)-1) { + ggml_cl_init(); + clGetDeviceInfo(device, CL_DEVICE_MEM_BASE_ADDR_ALIGN, sizeof(cl_uint), &alignment, NULL); + } + return alignment; + + GGML_UNUSED(buffer_type); +} + +static bool ggml_backend_opencl_buffer_type_supports_backend(ggml_backend_buffer_type_t buffer_type, ggml_backend_t backend) { + //return ggml_backend_is_opencl(backend); // opencl must be used through the cpu backend + return ggml_backend_is_cpu(backend); + + GGML_UNUSED(buffer_type); +} + +static ggml_backend_buffer_type_i ggml_backend_opencl_buffer_type_interface = { + /* .get_name = */ ggml_backend_opencl_buffer_type_name, + /* .alloc_buffer = */ ggml_backend_opencl_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_opencl_buffer_type_get_alignment, + /* .get_alloc_size = */ NULL, + /* .supports_backend = */ ggml_backend_opencl_buffer_type_supports_backend, + /* .is_host = */ NULL, +}; + + +ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type() { + static ggml_backend_buffer_type buffer_type = { + /* .iface = */ ggml_backend_opencl_buffer_type_interface, + /* .context = */ nullptr, + }; + + return &buffer_type; +} + +#if 0 +// host buffer type + +static const char * ggml_backend_opencl_host_buffer_type_name(ggml_backend_buffer_type_t buft) { + return "CL_Host"; + + GGML_UNUSED(buft); +} + +static const char * ggml_backend_opencl_host_buffer_name(ggml_backend_buffer_t buffer) { + return "CL_Host"; + + GGML_UNUSED(buffer); +} + +static void ggml_backend_opencl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) { + ggml_cl_host_free(buffer->context); +} + +static ggml_backend_buffer_t ggml_backend_opencl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + void * ptr = ggml_cl_host_malloc(size); + + if (ptr == nullptr) { + // fallback to cpu buffer + return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size); + } + + ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size); + buffer->buft = buft; + buffer->iface.get_name = ggml_backend_opencl_host_buffer_name; + buffer->iface.free_buffer = ggml_backend_opencl_host_buffer_free_buffer; + + return buffer; +} + +ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type() { + static struct ggml_backend_buffer_type ggml_backend_opencl_buffer_type_host = { + /* .iface = */ { + /* .get_name = */ ggml_backend_opencl_host_buffer_type_name, + /* .alloc_buffer = */ ggml_backend_opencl_host_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment, + /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size, + /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend, + /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host, + }, + /* .context = */ nullptr, + }; + + return &ggml_backend_opencl_buffer_type_host; +} + +// backend + +static const char * ggml_backend_opencl_name(ggml_backend_t backend) { + return "OpenCL"; + + GGML_UNUSED(backend); +} + +static void ggml_backend_opencl_free(ggml_backend_t backend) { + GGML_UNUSED(backend); +} + +static ggml_backend_buffer_type_t ggml_backend_opencl_get_default_buffer_type(ggml_backend_t backend) { + return ggml_backend_opencl_buffer_type(); + + GGML_UNUSED(backend); +} + +static bool ggml_backend_opencl_graph_compute(ggml_backend_t backend, ggml_cgraph * graph) { + for (int i = 0; i < graph->n_nodes; ++i) { + ggml_tensor * node = graph->nodes[i]; + switch (node->op) { + case GGML_OP_MUL_MAT: + ggml_cl_mul_mat(node->src[0], node->src[1], node, nullptr, 0); + break; + case GGML_OP_MUL: + ggml_cl_mul(node->src[0], node->src[1], node); + break; + default: + GGML_ASSERT(false); + } + } + + return true; + + GGML_UNUSED(backend); +} + +static bool ggml_backend_opencl_supports_op(ggml_backend_t backend, const ggml_tensor * op) { + switch (op->op) { + case GGML_OP_MUL_MAT: + return ggml_cl_can_mul_mat(op->src[0], op->src[1], op); + case GGML_OP_MUL: + // return ggml_can_repeat_rows(op->src[1], op->src[0]); + return true; + default: + return false; + } + + GGML_UNUSED(backend); +} + +static ggml_backend_i opencl_backend_i = { + /* .get_name = */ ggml_backend_opencl_name, + /* .free = */ ggml_backend_opencl_free, + /* .get_default_buffer_type = */ ggml_backend_opencl_get_default_buffer_type, + /* .set_tensor_async = */ NULL, + /* .get_tensor_async = */ NULL, + /* .cpy_tensor_from_async = */ NULL, + /* .cpy_tensor_to_async = */ NULL, + /* .synchronize = */ NULL, + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_compute = */ NULL, + /* .graph_compute = */ ggml_backend_opencl_graph_compute, + /* .supports_op = */ ggml_backend_opencl_supports_op, +}; + +ggml_backend_t ggml_backend_opencl_init() { + ggml_backend_t backend = new ggml_backend { + /* .interface = */ opencl_backend_i, + /* .context = */ nullptr + }; + + return backend; +} + +bool ggml_backend_is_opencl(ggml_backend_t backend) { + return backend && backend->iface.get_name == ggml_backend_opencl_name; +} +#endif diff --git a/ggml-opencl.h b/ggml-opencl.h index 44d05bd64a3ad..919b00d63a04e 100644 --- a/ggml-opencl.h +++ b/ggml-opencl.h @@ -1,6 +1,7 @@ #pragma once #include "ggml.h" +#include "ggml-backend.h" #ifdef __cplusplus extern "C" { @@ -9,17 +10,26 @@ extern "C" { GGML_API void ggml_cl_init(void); GGML_API void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); -GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); +GGML_API bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, const struct ggml_tensor * dst); GGML_API size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst); GGML_API void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize); -GGML_API void * ggml_cl_host_malloc(size_t size); -GGML_API void ggml_cl_host_free(void * ptr); +// GGML_API void * ggml_cl_host_malloc(size_t size); +// GGML_API void ggml_cl_host_free(void * ptr); GGML_API void ggml_cl_free_data(const struct ggml_tensor* tensor); GGML_API void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor); +// backend API + +// GGML_API ggml_backend_t ggml_backend_opencl_init(void); + +// GGML_API bool ggml_backend_is_opencl(ggml_backend_t backend); + +GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_buffer_type(void); +// GGML_API ggml_backend_buffer_type_t ggml_backend_opencl_host_buffer_type(void); + #ifdef __cplusplus } #endif diff --git a/ggml.c b/ggml.c index 90814b8e2dbe7..7dcc70a5843b3 100644 --- a/ggml.c +++ b/ggml.c @@ -16601,7 +16601,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { return GGML_EXIT_SUCCESS; } -struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) { +struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threads) { if (n_threads <= 0) { n_threads = GGML_DEFAULT_N_THREADS; } diff --git a/ggml.h b/ggml.h index 163e2d2e5631c..98191b1a3624d 100644 --- a/ggml.h +++ b/ggml.h @@ -1852,8 +1852,8 @@ extern "C" { // ggml_graph_plan() has to be called before ggml_graph_compute() // when plan.work_size > 0, caller must allocate memory for plan.work_data - GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/); - GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan); + GGML_API struct ggml_cplan ggml_graph_plan (const struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/); + GGML_API int ggml_graph_compute( struct ggml_cgraph * cgraph, struct ggml_cplan * cplan); // same as ggml_graph_compute() but the work data is allocated as a part of the context // note: the drawback of this API is that you must have ensured that the context has enough memory for the work data diff --git a/llama.cpp b/llama.cpp index 35de91b6174ac..3af2419d49dc7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1204,8 +1204,7 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer ggml_backend_buffer_type_t buft = nullptr; #if defined(GGML_USE_CUBLAS) - // in some cases such as the KV cache, there is no benefit to using a host buffer, - // since the data is never copied to the GPU + // host buffers should only be used when data is expected to be copied to/from the GPU if (host_buffer) { buft = ggml_backend_cuda_host_buffer_type(); } @@ -1228,6 +1227,8 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) { buft = ggml_backend_metal_buffer_type(); #elif defined(GGML_USE_CUBLAS) buft = ggml_backend_cuda_buffer_type(gpu); +#elif defined(GGML_USE_CLBLAST) + buft = ggml_backend_opencl_buffer_type(); #endif if (buft == nullptr) { @@ -1695,6 +1696,10 @@ static bool llama_kv_cache_init( cache.cells.clear(); cache.cells.resize(n_ctx); +#ifdef GGML_USE_CLBLAST + offload = false; +#endif + // count used buffer types std::map buft_layer_count; if (offload) { @@ -1702,9 +1707,10 @@ static bool llama_kv_cache_init( buft_layer_count[model.buft_layer[i].buft]++; } } else { - buft_layer_count[llama_default_buffer_type_cpu(false)] = n_layer; + buft_layer_count[llama_default_buffer_type_cpu(true)] = n_layer; } + // create a context for each buffer type std::map ctx_map; for (auto & it : buft_layer_count) { int n_layers = it.second; @@ -2413,26 +2419,12 @@ struct llama_model_loader { } void init_mapping(bool prefetch = true, llama_mlock * lmlock = nullptr) { - /* - // prefetch only CPU tensors - if (use_mmap) { - size_t size_pref = 0; // prefetch - - for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { - struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); - if (cur->backend == buffer_type_cpu) { - size_t tensor_end = gguf_get_tensor_offset(ctx_gguf, i) + ggml_nbytes(cur); - size_pref = std::max(size_pref, tensor_end); - } - } - mapping.reset(new llama_mmap(&file, gguf_get_data_offset(ctx_gguf) + size_pref, ggml_is_numa())); - } - */ // prefetch the whole file - all the data is needed anyway if (use_mmap) { mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa())); } + // compute the total size of all tensors for progress reporting for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, gguf_get_tensor_name(ctx_gguf, i)); size_data += ggml_nbytes(cur); @@ -3173,16 +3165,15 @@ static bool llm_load_tensors( model.main_gpu = main_gpu; model.n_gpu_layers = n_gpu_layers; - size_t ctx_size = ggml_tensor_overhead()*ml.n_tensors; - const int64_t n_layer = hparams.n_layer; const int64_t i_gpu_start = std::max((int64_t) hparams.n_layer - n_gpu_layers, (int64_t) 0); - - // there is very little benefit to offloading the input layers, so always keep it on the CPU + // there is very little benefit to offloading the input layer, so always keep it on the CPU model.buft_input = llama_default_buffer_type_cpu(true); + model.buft_layer.resize(n_layer); - // cpu layers + + // assign cpu layers for (int64_t i = 0; i < i_gpu_start; ++i) { model.buft_layer[i] = llama_default_buffer_type_cpu(true); } @@ -3191,11 +3182,8 @@ static bool llm_load_tensors( if (split_mode == LLAMA_SPLIT_LAYER) { // calculate the split points int device_count = ggml_backend_cuda_get_device_count(); + bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; }); float splits[GGML_CUDA_MAX_DEVICES]; - if (tensor_split != nullptr) { - std::copy(tensor_split, tensor_split + device_count, splits); - } - bool all_zero = tensor_split == nullptr || std::all_of(splits, splits + device_count, [](float x) { return x == 0.0f; }); if (all_zero) { // default split, by free memory for (int i = 0; i < device_count; ++i) { @@ -3204,7 +3192,11 @@ static bool llm_load_tensors( ggml_backend_cuda_get_device_memory(i, &total, &free); splits[i] = free; } + } else { + std::copy(tensor_split, tensor_split + device_count, splits); } + + // sum and normalize the splits to get the split points float split_sum = 0.0f; for (int i = 0; i < device_count; ++i) { split_sum += splits[i]; @@ -3214,15 +3206,15 @@ static bool llm_load_tensors( splits[i] /= split_sum; } - // assign GPU layers according to the splits to the devices + // assign the repeating layers to the devices according to the splits int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1); for (int64_t i = i_gpu_start; i < n_layer; ++i) { int layer_gpu = std::upper_bound(splits, splits + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits; model.buft_layer[i] = llama_default_buffer_type_offload(layer_gpu); } - // output layer + // assign the output layer if (n_gpu_layers > n_layer) { - int layer_gpu = std::upper_bound(splits, splits + device_count, float(n_layer)/act_gpu_layers) - splits; + int layer_gpu = std::upper_bound(splits, splits + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits; model.buft_output = llama_default_buffer_type_offload(layer_gpu); } else { model.buft_output = llama_default_buffer_type_cpu(true); @@ -3234,16 +3226,17 @@ static bool llm_load_tensors( if (split_mode == LLAMA_SPLIT_ROW) { split_buft = llama_default_buffer_type_split(main_gpu, tensor_split); } else { + // LLAMA_SPLIT_NONE or LLAMA_SPLIT_LAYER in backends where it is not supported split_buft = llama_default_buffer_type_offload(main_gpu); } - // repeating layers + // assign the repeating layers for (int64_t i = i_gpu_start; i < n_layer; ++i) { model.buft_layer[i] = { split_buft, llama_default_buffer_type_offload(main_gpu) }; } - // output layer + // assign the output layer if (n_gpu_layers > n_layer) { model.buft_output = { split_buft, @@ -3266,6 +3259,7 @@ static bool llm_load_tensors( } // create one context per buffer type + size_t ctx_size = ggml_tensor_overhead()*ml.n_tensors; std::map ctx_map; for (auto & it : buft_layer_count) { struct ggml_init_params params = { @@ -3289,14 +3283,11 @@ static bool llm_load_tensors( const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); const int64_t n_embd_gqa = n_embd_v_gqa; - const int64_t n_layer = hparams.n_layer; const int64_t n_vocab = hparams.n_vocab; const int64_t n_ff = hparams.n_ff; GGML_ASSERT(n_embd_gqa == n_embd_k_gqa); - // FIXME: for metal, it may be better to put the input on the GPU context - however it may make no difference in practice, - // and it would increase the metal buffer size ggml_context * ctx_input = ctx_map.at(model.buft_input.buft); ggml_context * ctx_output = ctx_map.at(model.buft_output.buft); ggml_context * ctx_output_split = ctx_map.at(model.buft_output.buft_matrix); @@ -3617,8 +3608,8 @@ static bool llm_load_tensors( layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); - layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd * 3}); - layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd * 3}); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3}); layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}); @@ -3736,8 +3727,7 @@ static bool llm_load_tensors( ml.init_mapping(true, use_mlock ? &model.mlock_mmap : nullptr); - // create backend buffers - + // create the backend buffers std::vector> ctx_bufs; for (auto & it : ctx_map) { @@ -3745,8 +3735,9 @@ static bool llm_load_tensors( ggml_context * ctx = it.second; ggml_backend_buffer_t buf = nullptr; - // only the region containing the tensors in the model is mapped to the backend buffer - // this is important for metal: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers + // only the mmap region containing the tensors in the model is mapped to the backend buffer + // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers + // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size if (ml.use_mmap && buft == llama_default_buffer_type_cpu(true)) { size_t first, last; ml.get_mapping_range(&first, &last, ctx); @@ -3771,7 +3762,7 @@ static bool llm_load_tensors( throw std::runtime_error("failed to allocate buffer"); } // indicate that this buffer contains weights - // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are always scheduled to the backend that contains the weight + // this is used by ggml_backend_sched to improve op scheduling -> ops that use a weight are preferably scheduled to the backend that contains the weight ggml_backend_buffer_set_usage(buf, GGML_BACKEND_BUFFER_USAGE_WEIGHTS); model.bufs.push_back(buf); ctx_bufs.emplace_back(ctx, buf); @@ -3803,7 +3794,7 @@ static bool llm_load_tensors( } } - // load data + // load tensor data for (auto & it : ctx_bufs) { ggml_context * ctx = it.first; ggml_backend_buffer_t buf = it.second; @@ -3849,7 +3840,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, cons return -2; } } catch (const std::exception & err) { - LLAMA_LOG_ERROR("error loading model: %s\n", err.what()); + LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what()); return -1; } From 5d2dffcf4881a05d3dc57f3931467c140b202d98 Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 10 Jan 2024 01:07:56 +0100 Subject: [PATCH 21/34] cuda : only use batched_cublas with batched mat muls (fixes fp16 tg perf) --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 484718bb022d4..b5a4a7349a15b 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9201,7 +9201,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } else if (!split && all_on_device && !fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); - } else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { + } else if (!split && all_on_device && fp16_performance_good && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) { // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) { From 07a1b052e5cc3420bb712840b186d90436abd43d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 10 Jan 2024 10:15:36 +0200 Subject: [PATCH 22/34] llama : on Metal, by default offload the full model ggml-ci --- llama.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index ceb70025d20eb..b2e2ca298c1c3 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9069,7 +9069,8 @@ struct llama_model_params llama_model_default_params() { }; #ifdef GGML_USE_METAL - result.n_gpu_layers = 1; + // note: we usually have plenty of VRAM, so by default offload all layers to the GPU + result.n_gpu_layers = 999; #endif return result; From 3cd0cbb1b5db24899fcf4fa7dd2feeeb28e77723 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 10 Jan 2024 11:50:10 +0200 Subject: [PATCH 23/34] metal : page align the data ptr (#4854) --- ggml-metal.m | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ggml-metal.m b/ggml-metal.m index 1e7ed6bb8b756..547b618b49f62 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2656,6 +2656,14 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz ctx->n_buffers = 0; const size_t size_page = sysconf(_SC_PAGESIZE); + + // page-align the data ptr + { + const uintptr_t offs = (uintptr_t) data % size_page; + data = (void *) ((char *) data - offs); + size += offs; + } + size_t size_aligned = size; if ((size_aligned % size_page) != 0) { size_aligned += (size_page - (size_aligned % size_page)); From 74066f8c411ac727a1d43081d98a26a6f8abddf4 Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 10 Jan 2024 13:27:19 +0100 Subject: [PATCH 24/34] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Johannes Gäßler --- ggml-backend.c | 2 +- llama.h | 8 +++++--- tests/test-backend-ops.cpp | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 25d9c98d593ab..535426b9a9a82 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1006,7 +1006,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g // pass 2: assign backends to ops from current assignments // start from the end and assign the same backend to previous ops - // expand gpu backends (ie non last prio) up and down, ignoring cpu + // expand gpu backends (i.e. non last prio) up and down, ignoring cpu // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops // pass 2.1 expand gpu up diff --git a/llama.h b/llama.h index 5dc4c93571a58..d387ad77bf179 100644 --- a/llama.h +++ b/llama.h @@ -185,9 +185,11 @@ extern "C" { struct llama_model_params { int32_t n_gpu_layers; // number of layers to store in VRAM enum llama_split_mode split_mode; // how to split the model across multiple GPUs - // the GPU that is used for the model (LLAMA_SPLIT_NONE), - // for small tensors and intermediate results (LLAMA_SPLIT_ROW) - // ignored for LLAMA_SPLIT_LAYER + + // main_gpu interpretation depends on split_mode: + // LLAMA_SPLIT_NONE: the GPU that is used for the entire model + // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results + // LLAMA_SPLIT_LAYER: ignored int32_t main_gpu; // proportion of the model (layers or rows) to offload to each GPU, size: LLAMA_MAX_DEVICES const float * tensor_split; diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index e27f9f2844c66..d9b8b106a6033 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -468,7 +468,7 @@ struct test_case { GGML_UNUSED(index); }; - bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud); + const bool cmp_ok = ggml_backend_compare_graph_backend(backend1, backend2, gf, callback, &ud); if (!cmp_ok) { printf("compare failed "); From c522c112b3c791d08905e921048e51db5a57f17e Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 10 Jan 2024 21:31:28 +0100 Subject: [PATCH 25/34] cuda : fix split buffer free --- ggml-cuda.cu | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index b5a4a7349a15b..f83d60fa3d93a 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -10092,9 +10092,13 @@ struct ggml_backend_cuda_split_buffer_context { for (ggml_tensor_extra_gpu * extra : tensor_extras) { for (int id = 0; id < g_device_count; ++id) { for (int64_t is = 0; is < MAX_STREAMS; ++is) { - CUDA_CHECK(cudaEventDestroy(extra->events[id][is])); + if (extra->events[id][is] != nullptr) { + CUDA_CHECK(cudaEventDestroy(extra->events[id][is])); + } + } + if (extra->data_device[id] != nullptr) { + CUDA_CHECK(cudaFree(extra->data_device[id])); } - CUDA_CHECK(cudaFree(extra->data_device[id])); } delete extra; } From 9d4ba6ed0795a8d83e350ae4093eda871cdcfa33 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 11 Jan 2024 03:13:09 +0100 Subject: [PATCH 26/34] address review comments --- ggml-backend.c | 9 +++++---- ggml-cuda.cu | 43 ++++++++++++++++++++++--------------------- llama.cpp | 4 +++- 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 535426b9a9a82..9b5f1ffba57e2 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1006,7 +1006,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g // pass 2: assign backends to ops from current assignments // start from the end and assign the same backend to previous ops - // expand gpu backends (i.e. non last prio) up and down, ignoring cpu + // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend) // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops // pass 2.1 expand gpu up @@ -1020,7 +1020,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g ggml_tallocr_t node_allocr = node_allocr(node); if (node_allocr != NULL) { if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) { - // skip cpu + // skip cpu (lowest prio backend) cur_allocr = NULL; } else { cur_allocr = node_allocr; @@ -1043,7 +1043,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g ggml_tallocr_t node_allocr = node_allocr(node); if (node_allocr != NULL) { if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) { - // skip cpu + // skip cpu (lowest prio backend) cur_allocr = NULL; } else { cur_allocr = node_allocr; @@ -1107,9 +1107,10 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g // pass 4: split graph, find tensors that need to be copied { int cur_split = 0; + // find the backend of the first split, skipping view ops for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; - if (node->view_src == NULL) { + if (!ggml_is_view_op(node->op)) { sched->splits[0].tallocr = node_allocr(node); break; } diff --git a/ggml-cuda.cu b/ggml-cuda.cu index f83d60fa3d93a..b0435b81f8ab7 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9856,30 +9856,31 @@ void ggml_cuda_get_device_description(int device, char * description, size_t des #define UNUSED GGML_UNUSED -struct ggml_backend_context_cuda { +struct ggml_backend_cuda_context { int device; std::string name; }; // cuda buffer -struct ggml_backend_buffer_context_cuda { +struct ggml_backend_cuda_buffer_context { int device; void * dev_ptr = nullptr; ggml_tensor_extra_gpu * temp_tensor_extras = nullptr; size_t temp_tensor_extra_index = 0; std::string name; - ggml_backend_buffer_context_cuda(int device, void * dev_ptr) : + ggml_backend_cuda_buffer_context(int device, void * dev_ptr) : device(device), dev_ptr(dev_ptr), name(GGML_CUDA_NAME + std::to_string(device)) { } - ~ggml_backend_buffer_context_cuda() { + ~ggml_backend_cuda_buffer_context() { delete[] temp_tensor_extras; } ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() { + // TODO: remove GGML_CUDA_MAX_NODES, allocate dynamically and reuse in backend_buffer_reset if (temp_tensor_extras == nullptr) { temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_CUDA_MAX_NODES]; } @@ -9894,23 +9895,23 @@ struct ggml_backend_buffer_context_cuda { }; static const char * ggml_backend_cuda_buffer_get_name(ggml_backend_buffer_t buffer) { - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; return ctx->name.c_str(); } static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; CUDA_CHECK(cudaFree(ctx->dev_ptr)); delete ctx; } static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) { - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; return ctx->dev_ptr; } static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; if (tensor->view_src != NULL && tensor->view_offs == 0) { assert(tensor->view_src->buffer->buft == buffer->buft); @@ -9944,7 +9945,7 @@ static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, g static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaDeviceSynchronize()); @@ -9955,7 +9956,7 @@ static void ggml_backend_cuda_buffer_set_tensor(ggml_backend_buffer_t buffer, gg static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaDeviceSynchronize()); @@ -9963,7 +9964,7 @@ static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, co } static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { - ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context; + ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaDeviceSynchronize()); @@ -10011,7 +10012,7 @@ static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_bac return nullptr; } - ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda(buft_ctx->device, dev_ptr); + ggml_backend_cuda_buffer_context * ctx = new ggml_backend_cuda_buffer_context(buft_ctx->device, dev_ptr); return ggml_backend_buffer_init(buft, ggml_backend_cuda_buffer_interface, ctx, size); } @@ -10048,7 +10049,7 @@ static bool ggml_backend_cuda_buffer_type_supports_backend(ggml_backend_buffer_t } ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context; - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; return buft_ctx->device == cuda_ctx->device; } @@ -10420,26 +10421,26 @@ ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() { // backend static const char * ggml_backend_cuda_name(ggml_backend_t backend) { - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; return cuda_ctx->name.c_str(); } static void ggml_backend_cuda_free(ggml_backend_t backend) { - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; delete cuda_ctx; delete backend; } static ggml_backend_buffer_type_t ggml_backend_cuda_get_default_buffer_type(ggml_backend_t backend) { - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; return ggml_backend_cuda_buffer_type(cuda_ctx->device); } static void ggml_backend_cuda_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); @@ -10448,7 +10449,7 @@ static void ggml_backend_cuda_set_tensor_async(ggml_backend_t backend, ggml_tens } static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; GGML_ASSERT(tensor->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && "unsupported buffer type"); GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU); @@ -10457,7 +10458,7 @@ static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggm } static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[cuda_ctx->device][0])); @@ -10465,7 +10466,7 @@ static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { } static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { - ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context; + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; ggml_cuda_set_main_device(cuda_ctx->device); @@ -10642,7 +10643,7 @@ ggml_backend_t ggml_backend_cuda_init(int device) { // not strictly necessary, but it may reduce the overhead of the first graph_compute ggml_cuda_set_main_device(device); - ggml_backend_context_cuda * ctx = new ggml_backend_context_cuda { + ggml_backend_cuda_context * ctx = new ggml_backend_cuda_context { /* .device = */ device, /* .name = */ GGML_CUDA_NAME + std::to_string(device), }; diff --git a/llama.cpp b/llama.cpp index b2e2ca298c1c3..7a573d77bee0e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9267,6 +9267,8 @@ struct llama_context * llama_new_context_with_model( ctx->backend_metal = ggml_backend_metal_init(); if (ctx->backend_metal == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__); + llama_free(ctx); + return nullptr; } ctx->backends.push_back(ctx->backend_metal); } @@ -9298,7 +9300,7 @@ struct llama_context * llama_new_context_with_model( ctx->backend_cpu = ggml_backend_cpu_init(); if (ctx->backend_cpu == nullptr) { LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__); - delete ctx; + llama_free(ctx); return nullptr; } ctx->backends.push_back(ctx->backend_cpu); From d83c08402093c49fbff6fb4c377fb91c3a934572 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 11 Jan 2024 03:25:36 +0100 Subject: [PATCH 27/34] llama-bench : add split-mode parameter --- examples/llama-bench/llama-bench.cpp | 146 ++++++++++++++------------- 1 file changed, 78 insertions(+), 68 deletions(-) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 7f7186cded527..97325b5bd634f 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -128,6 +128,25 @@ static std::string get_gpu_info() { // command line params enum output_formats {CSV, JSON, MARKDOWN, SQL}; +static const char * output_format_str(output_formats format) { + switch (format) { + case CSV: return "csv"; + case JSON: return "json"; + case MARKDOWN: return "md"; + case SQL: return "sql"; + default: GGML_ASSERT(!"invalid output format"); + } +} + +static const char * split_mode_str(llama_split_mode mode) { + switch (mode) { + case LLAMA_SPLIT_NONE: return "none"; + case LLAMA_SPLIT_LAYER: return "layer"; + case LLAMA_SPLIT_ROW: return "row"; + default: GGML_ASSERT(!"invalid split mode"); + } +} + struct cmd_params { std::vector model; std::vector n_prompt; @@ -137,6 +156,7 @@ struct cmd_params { std::vector type_v; std::vector n_threads; std::vector n_gpu_layers; + std::vector split_mode; std::vector main_gpu; std::vector no_kv_offload; std::vector mul_mat_q; @@ -155,6 +175,7 @@ static const cmd_params cmd_params_defaults = { /* type_v */ {GGML_TYPE_F16}, /* n_threads */ {get_num_physical_cores()}, /* n_gpu_layers */ {99}, + /* split_mode */ {LLAMA_SPLIT_LAYER}, /* main_gpu */ {0}, /* no_kv_offload */ {false}, /* mul_mat_q */ {true}, @@ -169,21 +190,22 @@ static void print_usage(int /* argc */, char ** argv) { printf("\n"); printf("options:\n"); printf(" -h, --help\n"); - printf(" -m, --model (default: %s)\n", join(cmd_params_defaults.model, ",").c_str()); - printf(" -p, --n-prompt (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str()); - printf(" -n, --n-gen (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str()); - printf(" -b, --batch-size (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str()); - printf(" -ctk , --cache-type-k (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str()); - printf(" -ctv , --cache-type-v (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str()); - printf(" -t, --threads (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str()); - printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str()); - printf(" -mg, --main-gpu (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str()); - printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str()); - printf(" -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str()); - printf(" -ts, --tensor_split \n"); - printf(" -r, --repetitions (default: %d)\n", cmd_params_defaults.reps); - printf(" -o, --output (default: %s)\n", cmd_params_defaults.output_format == CSV ? "csv" : cmd_params_defaults.output_format == JSON ? "json" : cmd_params_defaults.output_format == MARKDOWN ? "md" : "sql"); - printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0"); + printf(" -m, --model (default: %s)\n", join(cmd_params_defaults.model, ",").c_str()); + printf(" -p, --n-prompt (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str()); + printf(" -n, --n-gen (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str()); + printf(" -b, --batch-size (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str()); + printf(" -ctk , --cache-type-k (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str()); + printf(" -ctv , --cache-type-v (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str()); + printf(" -t, --threads (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str()); + printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str()); + printf(" -sm, --split-mode (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str()); + printf(" -mg, --main-gpu (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str()); + printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str()); + printf(" -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str()); + printf(" -ts, --tensor_split (default: 0)\n"); + printf(" -r, --repetitions (default: %d)\n", cmd_params_defaults.reps); + printf(" -o, --output (default: %s)\n", output_format_str(cmd_params_defaults.output_format)); + printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0"); printf("\n"); printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n"); } @@ -306,6 +328,28 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { } auto p = split(argv[i], split_delim); params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end()); + } else if (arg == "-sm" || arg == "--split-mode") { + if (++i >= argc) { + invalid_param = true; + break; + } + auto p = split(argv[i], split_delim); + std::vector modes; + for (const auto & m : p) { + llama_split_mode mode; + if (m == "none") { + mode = LLAMA_SPLIT_NONE; + } else if (m == "layer") { + mode = LLAMA_SPLIT_LAYER; + } else if (m == "row") { + mode = LLAMA_SPLIT_ROW; + } else { + invalid_param = true; + break; + } + modes.push_back(mode); + } + params.split_mode.insert(params.split_mode.end(), modes.begin(), modes.end()); } else if (arg == "-mg" || arg == "--main-gpu") { if (++i >= argc) { invalid_param = true; @@ -392,6 +436,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) { if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; } if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; } if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; } + if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; } if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; } if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; } if (params.mul_mat_q.empty()) { params.mul_mat_q = cmd_params_defaults.mul_mat_q; } @@ -410,6 +455,7 @@ struct cmd_params_instance { ggml_type type_v; int n_threads; int n_gpu_layers; + llama_split_mode split_mode; int main_gpu; bool no_kv_offload; bool mul_mat_q; @@ -419,6 +465,7 @@ struct cmd_params_instance { llama_model_params mparams = llama_model_default_params(); mparams.n_gpu_layers = n_gpu_layers; + mparams.split_mode = split_mode; mparams.main_gpu = main_gpu; mparams.tensor_split = tensor_split.data(); @@ -428,6 +475,7 @@ struct cmd_params_instance { bool equal_mparams(const cmd_params_instance & other) const { return model == other.model && n_gpu_layers == other.n_gpu_layers && + split_mode == other.split_mode && main_gpu == other.main_gpu && tensor_split == other.tensor_split; } @@ -446,45 +494,13 @@ struct cmd_params_instance { } }; -static std::vector get_cmd_params_instances_int(const cmd_params & params, int n_gen, int n_prompt) { - std::vector instances; - - for (const auto & m : params.model) - for (const auto & nl : params.n_gpu_layers) - for (const auto & mg : params.main_gpu) - for (const auto & ts : params.tensor_split) - for (const auto & nb : params.n_batch) - for (const auto & tk : params.type_k) - for (const auto & tv : params.type_v) - for (const auto & mmq : params.mul_mat_q) - for (const auto & nkvo : params.no_kv_offload) - for (const auto & nt : params.n_threads) { - cmd_params_instance instance = { - /* .model = */ m, - /* .n_prompt = */ n_prompt, - /* .n_gen = */ n_gen, - /* .n_batch = */ nb, - /* .type_k = */ tk, - /* .type_v = */ tv, - /* .n_threads = */ nt, - /* .n_gpu_layers = */ nl, - /* .main_gpu = */ mg, - /* .no_kv_offload= */ nkvo, - /* .mul_mat_q = */ mmq, - /* .tensor_split = */ ts, - }; - instances.push_back(instance); - } - return instances; -} - static std::vector get_cmd_params_instances(const cmd_params & params) { std::vector instances; -#if 1 // this ordering minimizes the number of times that each model needs to be reloaded for (const auto & m : params.model) for (const auto & nl : params.n_gpu_layers) + for (const auto & sm : params.split_mode) for (const auto & mg : params.main_gpu) for (const auto & ts : params.tensor_split) for (const auto & nb : params.n_batch) @@ -506,6 +522,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .type_v = */ tv, /* .n_threads = */ nt, /* .n_gpu_layers = */ nl, + /* .split_mode = */ sm, /* .main_gpu = */ mg, /* .no_kv_offload= */ nkvo, /* .mul_mat_q = */ mmq, @@ -527,6 +544,7 @@ static std::vector get_cmd_params_instances(const cmd_param /* .type_v = */ tv, /* .n_threads = */ nt, /* .n_gpu_layers = */ nl, + /* .split_mode = */ sm, /* .main_gpu = */ mg, /* .no_kv_offload= */ nkvo, /* .mul_mat_q = */ mmq, @@ -535,24 +553,6 @@ static std::vector get_cmd_params_instances(const cmd_param instances.push_back(instance); } } -#else - // this ordering separates the prompt and generation tests - for (const auto & n_prompt : params.n_prompt) { - if (n_prompt == 0) { - continue; - } - auto instances_prompt = get_cmd_params_instances_int(params, 0, n_prompt); - instances.insert(instances.end(), instances_prompt.begin(), instances_prompt.end()); - } - - for (const auto & n_gen : params.n_gen) { - if (n_gen == 0) { - continue; - } - auto instances_gen = get_cmd_params_instances_int(params, n_gen, 0); - instances.insert(instances.end(), instances_gen.begin(), instances_gen.end()); - } -#endif return instances; } @@ -576,6 +576,7 @@ struct test { ggml_type type_k; ggml_type type_v; int n_gpu_layers; + llama_split_mode split_mode; int main_gpu; bool no_kv_offload; bool mul_mat_q; @@ -597,6 +598,7 @@ struct test { type_k = inst.type_k; type_v = inst.type_v; n_gpu_layers = inst.n_gpu_layers; + split_mode = inst.split_mode; main_gpu = inst.main_gpu; no_kv_offload = inst.no_kv_offload; mul_mat_q = inst.mul_mat_q; @@ -660,7 +662,8 @@ struct test { "cpu_info", "gpu_info", "model_filename", "model_type", "model_size", "model_n_params", "n_batch", "n_threads", "type_k", "type_v", - "n_gpu_layers", "main_gpu", "no_kv_offload", + "n_gpu_layers", "split_mode", + "main_gpu", "no_kv_offload", "mul_mat_q", "tensor_split", "n_prompt", "n_gen", "test_time", "avg_ns", "stddev_ns", @@ -711,7 +714,8 @@ struct test { cpu_info, gpu_info, model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params), std::to_string(n_batch), std::to_string(n_threads), ggml_type_name(type_k), ggml_type_name(type_v), - std::to_string(n_gpu_layers), std::to_string(main_gpu), std::to_string(no_kv_offload), + std::to_string(n_gpu_layers), split_mode_str(split_mode), + std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(mul_mat_q), tensor_split_str, std::to_string(n_prompt), std::to_string(n_gen), test_time, std::to_string(avg_ns()), std::to_string(stdev_ns()), @@ -867,6 +871,9 @@ struct markdown_printer : public printer { if (field == "n_gpu_layers") { return "ngl"; } + if (field == "split_mode") { + return "sm"; + } if (field == "n_threads") { return "threads"; } @@ -907,6 +914,9 @@ struct markdown_printer : public printer { if (params.main_gpu.size() > 1 || params.main_gpu != cmd_params_defaults.main_gpu) { fields.push_back("main_gpu"); } + if (params.split_mode.size() > 1 || params.split_mode != cmd_params_defaults.split_mode) { + fields.push_back("split_mode"); + } if (params.mul_mat_q.size() > 1 || params.mul_mat_q != cmd_params_defaults.mul_mat_q) { fields.push_back("mul_mat_q"); } From 6dcc42bd6bf8d6fa7fc91e673a58c13489385ac5 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 11 Jan 2024 03:28:07 +0100 Subject: [PATCH 28/34] fix whitespace --- llama.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/llama.h b/llama.h index d387ad77bf179..2d8ed371bf1da 100644 --- a/llama.h +++ b/llama.h @@ -185,12 +185,13 @@ extern "C" { struct llama_model_params { int32_t n_gpu_layers; // number of layers to store in VRAM enum llama_split_mode split_mode; // how to split the model across multiple GPUs - + // main_gpu interpretation depends on split_mode: // LLAMA_SPLIT_NONE: the GPU that is used for the entire model // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results // LLAMA_SPLIT_LAYER: ignored int32_t main_gpu; + // proportion of the model (layers or rows) to offload to each GPU, size: LLAMA_MAX_DEVICES const float * tensor_split; From 42aa835c58bc9352722d908ae3ae4591ec6b2c97 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 11 Jan 2024 12:00:18 +0100 Subject: [PATCH 29/34] opencl : fix double initialization --- ggml-opencl.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index cfa766eb1fbb2..732e24c9129aa 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -934,6 +934,7 @@ void ggml_cl_init(void) { if (initialized) { return; } + initialized = true; cl_int err; From c4867196b4d67a55688c9b4ffda0c29f6e8c8409 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 11 Jan 2024 14:46:53 +0100 Subject: [PATCH 30/34] server : add --split-mode parameter --- examples/server/server.cpp | 40 +++++++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 4a0714997f17d..a379dd723daab 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2004,12 +2004,15 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD printf(" -ngl N, --n-gpu-layers N\n"); printf(" number of layers to store in VRAM\n"); + printf(" -sm SPLIT_MODE, --split-mode SPLIT_MODE\n"); + printf(" how to split the model across multiple GPUs, one of:\n"); + printf(" - none: use one GPU only\n"); + printf(" - layer (default): split layers and KV across GPUs\n"); + printf(" - row: split rows across GPUs\n"); printf(" -ts SPLIT --tensor-split SPLIT\n"); - printf(" how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n"); - printf(" -mg i, --main-gpu i the GPU to use for scratch and small tensors\n"); - printf(" -nommq, --no-mul-mat-q\n"); - printf(" use cuBLAS instead of custom mul_mat_q CUDA kernels.\n"); - printf(" Not recommended since this is both slower and uses more VRAM.\n"); + printf(" fraction of the model to offload to each GPU, comma-separated list of proportions, e.g. 3,1\n"); + printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n"); + printf(" or for intermediate results and KV (with split-mode = row)\n"); #endif printf(" -m FNAME, --model FNAME\n"); printf(" model path (default: %s)\n", params.model.c_str()); @@ -2230,6 +2233,33 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, "See main README.md for information on enabling GPU BLAS support", {{"n_gpu_layers", params.n_gpu_layers}}); #endif + } + else if (arg == "--split-mode" || arg == "-sm") + { + if (++i >= argc) { + invalid_param = true; + break; + } + std::string arg_next = argv[i]; + if (arg_next == "none") + { + params.split_mode = LLAMA_SPLIT_NONE; + } + else if (arg_next == "layer") + { + params.split_mode = LLAMA_SPLIT_LAYER; + } + else if (arg_next == "row") + { + params.split_mode = LLAMA_SPLIT_ROW; + } + else { + invalid_param = true; + break; + } +#ifndef GGML_USE_CUBLAS + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Setting the split mode has no effect.\n"); +#endif // GGML_USE_CUBLAS } else if (arg == "--tensor-split" || arg == "-ts") { From 23c14ef53e31c428a6e0339ac149ac6f6e595a41 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 12 Jan 2024 00:50:00 +0100 Subject: [PATCH 31/34] use async copy and compute to improve multi-gpu performance ggml-ci --- ggml-alloc.c | 5 +- ggml-backend-impl.h | 33 +++--- ggml-backend.c | 255 +++++++++++++++++++++++--------------------- ggml-backend.h | 1 - ggml-cuda.cu | 54 ++++++++-- ggml-metal.m | 20 ++-- 6 files changed, 201 insertions(+), 167 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index 7836f064e8d89..4d1ceb0477a7a 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -102,8 +102,6 @@ void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) { } } - AT_PRINTF("block %d\n", best_fit_block); - if (best_fit_block == -1) { // the last block is our last resort struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1]; @@ -117,6 +115,7 @@ void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) { return; } } + struct free_block * block = &alloc->free_blocks[best_fit_block]; void * addr = block->addr; block->addr = (char*)block->addr + size; @@ -129,6 +128,8 @@ void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) { } } + AT_PRINTF("block %d, addr %p\n", best_fit_block, addr); + tensor->data = addr; tensor->buffer = alloc->buffer; if (!alloc->measure) { diff --git a/ggml-backend-impl.h b/ggml-backend-impl.h index 859e923e26200..1db32901fe6c7 100644 --- a/ggml-backend-impl.h +++ b/ggml-backend-impl.h @@ -35,17 +35,15 @@ extern "C" { typedef void * ggml_backend_buffer_context_t; struct ggml_backend_buffer_i { - const char * (*get_name) (ggml_backend_buffer_t buffer); - void (*free_buffer) (ggml_backend_buffer_t buffer); - void * (*get_base) (ggml_backend_buffer_t buffer); - void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); - void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); - void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); - // (optional) copy tensor between different buffer-type, allow for single-copy tranfers - void (*cpy_tensor_from)(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); - void (*clear) (ggml_backend_buffer_t buffer, uint8_t value); - void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras + const char * (*get_name) (ggml_backend_buffer_t buffer); + void (*free_buffer)(ggml_backend_buffer_t buffer); + void * (*get_base) (ggml_backend_buffer_t buffer); + void (*init_tensor)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); + void (*set_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); + void (*get_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + bool (*cpy_tensor) (ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst); // dst is in the buffer, src may be in any buffer + void (*clear) (ggml_backend_buffer_t buffer, uint8_t value); + void (*reset) (ggml_backend_buffer_t buffer); // reset any internal state due to tensor initialization, such as tensor extras }; struct ggml_backend_buffer { @@ -62,6 +60,8 @@ extern "C" { ggml_backend_buffer_context_t context, size_t size); + // do not use directly, use ggml_backend_tensor_copy instead + bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst); // // Backend @@ -77,14 +77,12 @@ extern "C" { // buffer allocation ggml_backend_buffer_type_t (*get_default_buffer_type)(ggml_backend_t backend); - // (optional) asynchroneous tensor data access + // (optional) asynchronous tensor data access void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size); void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size); + bool (*cpy_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst); - // (optional) asynchroneous tensor copy - void (*cpy_tensor_from_async)(ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst); - void (*cpy_tensor_to_async) (ggml_backend_t backend, const struct ggml_tensor * src, struct ggml_tensor * dst); - + // (optional) complete all pending operations void (*synchronize)(ggml_backend_t backend); // compute graph with a plan @@ -92,7 +90,7 @@ extern "C" { void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan); void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan); - // compute graph without a plan + // compute graph without a plan (async) bool (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph); // check if the backend supports an operation @@ -105,7 +103,6 @@ extern "C" { ggml_backend_context_t context; }; - // // Backend registry // diff --git a/ggml-backend.c b/ggml-backend.c index 9b5f1ffba57e2..62ffd07766af6 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -132,6 +132,14 @@ void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) { } } +bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) { + ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer; + if (dst_buf->iface.cpy_tensor) { + return src->buffer->iface.cpy_tensor(dst_buf, src, dst); + } + return false; +} + // backend const char * ggml_backend_name(ggml_backend_t backend) { @@ -165,30 +173,42 @@ void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - backend->iface.set_tensor_async(backend, tensor, data, offset, size); + if (backend->iface.set_tensor_async == NULL) { + ggml_backend_tensor_set(tensor, data, offset, size); + } else { + backend->iface.set_tensor_async(backend, tensor, data, offset, size); + } } void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - backend->iface.get_tensor_async(backend, tensor, data, offset, size); + if (backend->iface.get_tensor_async == NULL) { + ggml_backend_tensor_get(tensor, data, offset, size); + } else { + backend->iface.get_tensor_async(backend, tensor, data, offset, size); + } } void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; + GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); - GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set"); + GGML_ASSERT(buf != NULL && "tensor buffer not set"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds"); - tensor->buffer->iface.set_tensor(tensor->buffer, tensor, data, offset, size); + tensor->buffer->iface.set_tensor(buf, tensor, data, offset, size); } void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { + ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer; + GGML_ASSERT(tensor->data != NULL && "tensor not allocated"); GGML_ASSERT(tensor->buffer != NULL && "tensor buffer not set"); GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds"); - tensor->buffer->iface.get_tensor(tensor->buffer, tensor, data, offset, size); + tensor->buffer->iface.get_tensor(buf, tensor, data, offset, size); } void ggml_backend_synchronize(ggml_backend_t backend) { @@ -209,19 +229,10 @@ void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_pla void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) { backend->iface.graph_plan_compute(backend, plan); - - // TODO: optional sync - ggml_backend_synchronize(backend); } bool ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) { - if (!backend->iface.graph_compute(backend, cgraph)) { - return false; - } - - // TODO: optional sync - ggml_backend_synchronize(backend); - return true; + return backend->iface.graph_compute(backend, cgraph); } bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) { @@ -246,28 +257,20 @@ static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml } void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) { - //printf("src: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", src->name, (int)src->ne[0], (int)src->ne[1], (int)src->ne[2], (int)src->ne[3], (int)src->nb[0], (int)src->nb[1], (int)src->nb[2], (int)src->nb[3]); - //printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]); GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); - // fprintf(stderr, "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src)); - if (src == dst) { return; } - // TODO: allow backends to support copy to/from same backend - - if (dst->buffer->iface.cpy_tensor_from != NULL) { - dst->buffer->iface.cpy_tensor_from(dst->buffer, src, dst); - } else if (src->buffer->iface.cpy_tensor_to != NULL) { - src->buffer->iface.cpy_tensor_to(src->buffer, src, dst); - } else { - // shouldn't be hit when copying from/to CPU - #ifndef NDEBUG - fprintf(stderr, "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to " - "are implemented for %s and %s, falling back to get/set\n", src->name, dst->name); - #endif + if (ggml_backend_buffer_is_host(src->buffer)) { + ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); + } else if (ggml_backend_buffer_is_host(dst->buffer)) { + ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); + } else if (!ggml_backend_buffer_copy_tensor(src, dst)) { +#ifndef NDEBUG + fprintf(stderr, "%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer)); +#endif size_t nbytes = ggml_nbytes(src); void * data = malloc(nbytes); ggml_backend_tensor_get(src, data, 0, nbytes); @@ -276,6 +279,31 @@ void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst } } +void ggml_backend_tensor_copy_async(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) { + GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts"); + + if (src == dst) { + return; + } + + if (ggml_backend_buft_supports_backend(src->buffer->buft, backend) && ggml_backend_buft_supports_backend(dst->buffer->buft, backend)) { + if (backend->iface.cpy_tensor_async != NULL) { + if (backend->iface.cpy_tensor_async(backend, src, dst)) { + return; + } + } + } + + size_t nbytes = ggml_nbytes(src); + if (ggml_backend_buffer_is_host(src->buffer)) { + ggml_backend_tensor_set_async(backend, dst, src->data, 0, nbytes); + } + else { + ggml_backend_tensor_copy(src, dst); + } +} + + // backend registry #define GGML_MAX_BACKENDS_REG 16 @@ -437,14 +465,12 @@ static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, con GGML_UNUSED(buffer); } -static void ggml_backend_cpu_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { - ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); - - GGML_UNUSED(buffer); -} - -static void ggml_backend_cpu_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { - ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); +static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { + if (ggml_backend_buffer_is_host(src->buffer)) { + memcpy(dst->data, src->data, ggml_nbytes(src)); + return true; + } + return false; GGML_UNUSED(buffer); } @@ -460,8 +486,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i = { /* .init_tensor = */ NULL, // no initialization required /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, - /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from, - /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to, + /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, /* .clear = */ ggml_backend_cpu_buffer_clear, /* .reset = */ NULL, }; @@ -474,8 +499,7 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = { /* .init_tensor = */ NULL, // no initialization required /* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor, - /* .cpy_tensor_from = */ ggml_backend_cpu_buffer_cpy_tensor_from, - /* .cpy_tensor_to = */ ggml_backend_cpu_buffer_cpy_tensor_to, + /* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor, /* .clear = */ ggml_backend_cpu_buffer_clear, /* .reset = */ NULL, }; @@ -683,8 +707,7 @@ static struct ggml_backend_i cpu_backend_i = { /* .get_default_buffer_type = */ ggml_backend_cpu_get_default_buffer_type, /* .set_tensor_async = */ NULL, /* .get_tensor_async = */ NULL, - /* .cpy_tensor_from_async = */ NULL, - /* .cpy_tensor_to_async = */ NULL, + /* .cpy_tensor_async = */ NULL, /* .synchronize = */ NULL, /* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create, /* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free, @@ -748,12 +771,6 @@ struct ggml_backend_sched_split { struct ggml_cgraph graph; }; -// TODO: group all the hash values into a single struct for clarity -//struct sched_hash_value { -// ggml_tallocr_t tallocr; -// struct ggml_tensor * copies[GGML_MAX_BACKENDS]; -//}; - struct ggml_backend_sched { int n_backends; ggml_backend_t backends[GGML_MAX_BACKENDS]; @@ -810,14 +827,22 @@ static int sched_allocr_prio(ggml_backend_sched_t sched, ggml_tallocr_t allocr) return INT_MAX; } -static ggml_backend_t get_buffer_backend(ggml_backend_sched_t sched, ggml_backend_buffer_t buffer) { +static ggml_tallocr_t sched_allocr_from_buffer(ggml_backend_sched_t sched, ggml_backend_buffer_t buffer) { if (buffer == NULL) { return NULL; } + + // check if this is already allocate in a allocr buffer (from user manual allocations) + for (int i = 0; i < sched->n_backends; i++) { + if (ggml_tallocr_get_buffer(sched->tallocs[i]) == buffer) { + return sched->tallocs[i]; + } + } + // find highest prio backend that supports the buffer type for (int i = 0; i < sched->n_backends; i++) { if (ggml_backend_buft_supports_backend(buffer->buft, sched->backends[i])) { - return sched->backends[i]; + return sched->tallocs[i]; } } GGML_ASSERT(false && "tensor buffer type not supported by any backend"); @@ -827,7 +852,6 @@ static ggml_backend_t get_allocr_backend(ggml_backend_sched_t sched, ggml_talloc if (allocr == NULL) { return NULL; } - // find highest prio backend that supports the buffer type for (int i = 0; i < sched->n_backends; i++) { if (sched->tallocs[i] == allocr) { return sched->backends[i]; @@ -837,7 +861,7 @@ static ggml_backend_t get_allocr_backend(ggml_backend_sched_t sched, ggml_talloc } #if 0 -static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove +static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug only #define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__) #define GET_CAUSE(node) causes[hash_id(node)] #else @@ -846,49 +870,37 @@ static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_I #endif // returns the backend that should be used for the node based on the current locations -static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) { - // if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there - // ie. kv cache updates - // note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend. - +static ggml_tallocr_t sched_allocr_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) { + // assign pre-allocated nodes to their backend // dst - ggml_backend_t cur_backend = get_buffer_backend(sched, node->buffer); - if (cur_backend != NULL) { + ggml_tallocr_t cur_allocr = sched_allocr_from_buffer(sched, node->buffer); + if (cur_allocr != NULL) { SET_CAUSE(node, "1.dst"); - return cur_backend; + return cur_allocr; } - // view_src - if (node->view_src != NULL && get_buffer_backend(sched, node->view_src->buffer) != NULL) { - SET_CAUSE(node, "1.vsrc"); - return get_buffer_backend(sched, node->view_src->buffer); + if (node->view_src != NULL) { + cur_allocr = sched_allocr_from_buffer(sched, node->view_src->buffer); + if (cur_allocr != NULL) { + SET_CAUSE(node, "1.vsrc"); + return cur_allocr; + } } - - // src - size_t cur_size = 0; - + // assign nodes that use weights to the backend of the weights for (int i = 0; i < GGML_MAX_SRC; i++) { const struct ggml_tensor * src = node->src[i]; if (src == NULL) { break; } - - ggml_backend_t src_backend = get_buffer_backend(sched, src->buffer); if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) { - // operations with weights are always on the same backend as the weights - cur_backend = src_backend; + ggml_tallocr_t src_allocr = sched_allocr_from_buffer(sched, src->buffer); + // operations with weights are always run on the same backend as the weights SET_CAUSE(node, "1.wgt%d", i); - break; - } - - size_t src_size = ggml_nbytes(src); - if (src_size >= cur_size) { - cur_size = src_size; - cur_backend = src_backend; - SET_CAUSE(node, "1.src%d", i); + return src_allocr; } } - return cur_backend; + + return NULL; } static char * fmt_size(size_t size) { @@ -953,7 +965,6 @@ static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, co //#define DEBUG_PASS4 // assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend -// TODO: merge passes static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { // reset splits sched->n_splits = 0; @@ -972,20 +983,14 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g GGML_ASSERT(false); } - // pass 1: assign backends to ops with allocated inputs + // pass 1: assign backends to ops with pre-allocated inputs for (int i = 0; i < graph->n_leafs; i++) { struct ggml_tensor * leaf = graph->leafs[i]; if (node_allocr(leaf) != NULL) { // do not overwrite user assignments continue; } - ggml_backend_t leaf_backend = get_buffer_backend(sched, leaf->buffer); - if (leaf_backend == NULL && leaf->view_src != NULL) { - leaf_backend = get_buffer_backend(sched, leaf->view_src->buffer); - } - if (leaf_backend != NULL) { - node_allocr(leaf) = ggml_backend_sched_get_tallocr(sched, leaf_backend); - } + node_allocr(leaf) = sched_allocr_from_cur(sched, leaf); } for (int i = 0; i < graph->n_nodes; i++) { @@ -994,18 +999,24 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g // do not overwrite user assignments continue; } - ggml_backend_t node_backend = sched_backend_from_cur(sched, node); - if (node_backend != NULL) { - node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend); + node_allocr(node) = sched_allocr_from_cur(sched, node); + // src + for (int j = 0; j < GGML_MAX_SRC; j++) { + struct ggml_tensor * src = node->src[j]; + if (src == NULL) { + break; + } + if (node_allocr(src) == NULL) { + node_allocr(src) = sched_allocr_from_cur(sched, src); + } } } #ifdef DEBUG_PASS1 fprintf(stderr, "PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); #endif - // pass 2: assign backends to ops from current assignments - // start from the end and assign the same backend to previous ops - + // pass 2: expand current backend assignments + // assign the same backend to adjacent nodes // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend) // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops @@ -1027,7 +1038,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } } else { node_allocr(node) = cur_allocr; - SET_CAUSE(node, "2.cur"); + SET_CAUSE(node, "2.1"); } } } @@ -1050,7 +1061,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g } } else { node_allocr(node) = cur_allocr; - SET_CAUSE(node, "2.cur"); + SET_CAUSE(node, "2.2"); } } } @@ -1068,7 +1079,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g cur_allocr = node_allocr; } else { node_allocr(node) = cur_allocr; - SET_CAUSE(node, "2.cur"); + SET_CAUSE(node, "2.3"); } } } @@ -1080,7 +1091,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g for (int i = 0; i < graph->n_nodes; i++) { struct ggml_tensor * node = graph->nodes[i]; ggml_tallocr_t cur_allocr = node_allocr(node); - if (ggml_is_view_op(node->op) && cur_allocr == NULL) { + if (node->view_src != NULL && cur_allocr == NULL) { cur_allocr = node_allocr(node) = node_allocr(node->view_src); SET_CAUSE(node, "3.vsrc"); } @@ -1094,8 +1105,10 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g if (src->view_src != NULL) { // views are always on the same backend as the source node_allocr(src) = node_allocr(src->view_src); + SET_CAUSE(src, "3.vsrc"); } else { node_allocr(src) = cur_allocr; + SET_CAUSE(src, "3.cur"); } } } @@ -1136,7 +1149,6 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g sched->splits[cur_split].tallocr = node_allocr; sched->splits[cur_split].i_start = i; sched->splits[cur_split].n_inputs = 0; - memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK cur_allocr = node_allocr; cur_backend_id = sched_allocr_prio(sched, cur_allocr); } @@ -1148,6 +1160,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g break; } ggml_tallocr_t src_allocr = node_allocr(src); + GGML_ASSERT(src_allocr != NULL); // all inputs should be assigned by now if (src_allocr != node_allocr) { // check if the input is already in the split bool found = false; @@ -1162,17 +1175,19 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g int n_inputs = sched->splits[cur_split].n_inputs++; //printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr))); GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS); - sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src; + sched->splits[cur_split].inputs[n_inputs] = src; } // create a copy of the input in the split's backend size_t id = hash_id(src); if (sched->node_copies[id][cur_backend_id] == NULL) { + ggml_backend_t backend = get_allocr_backend(sched, cur_allocr); struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src); + ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name); + sched->node_copies[id][cur_backend_id] = tensor_copy; node_allocr(tensor_copy) = cur_allocr; - ggml_backend_t backend = get_allocr_backend(sched, cur_allocr); - ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name); + SET_CAUSE(tensor_copy, "4.cpy"); } node->src[j] = sched->node_copies[id][cur_backend_id]; } @@ -1231,6 +1246,7 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g struct ggml_tensor * input = split->inputs[j]; struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)]; // add a dependency to the input source so that it is not freed before the copy is done + GGML_ASSERT(input_cpy->src[0] == NULL || input_cpy->src[0] == input); input_cpy->src[0] = input; graph_copy->nodes[graph_copy->n_nodes++] = input_cpy; } @@ -1265,25 +1281,16 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { uint64_t copy_start_us = ggml_time_us(); for (int j = 0; j < split->n_inputs; j++) { struct ggml_tensor * input = split->inputs[j]; - struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_backend_prio(sched, split_backend)]; - if (input->buffer == NULL) { - GGML_ASSERT(false); - if (input->view_src == NULL) { - fprintf(stderr, "input %s has no buffer and no view_src\n", input->name); - GGML_ASSERT(false); - } - // FIXME: may need to use the sched buffer instead - ggml_backend_view_init(input->view_src->buffer, input); - } - if (input_cpy->buffer == NULL) { - fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name); - GGML_ASSERT(false); - } + struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][split_backend_id]; + + GGML_ASSERT(input->buffer != NULL); + GGML_ASSERT(input_cpy->buffer != NULL); + // TODO: avoid this copy if it was already copied in a previous split, and the input didn't change // this is important to avoid copying constants such as KQ_mask and inp_pos multiple times - ggml_backend_tensor_copy(input, input_cpy); + ggml_backend_tensor_copy_async(split_backend, input, input_cpy); } - // ggml_backend_synchronize(split_backend); + //ggml_backend_synchronize(split_backend); // necessary to measure copy time int64_t copy_end_us = ggml_time_us(); copy_us[split_backend_id] += copy_end_us - copy_start_us; @@ -1295,7 +1302,7 @@ static void sched_compute_splits(ggml_backend_sched_t sched) { uint64_t compute_start_us = ggml_time_us(); ggml_backend_graph_compute(split_backend, &split->graph); - // ggml_backend_synchronize(split_backend); + //ggml_backend_synchronize(split_backend); // necessary to measure compute time uint64_t compute_end_us = ggml_time_us(); compute_us[split_backend_id] += compute_end_us - compute_start_us; } diff --git a/ggml-backend.h b/ggml-backend.h index c4eff546a3d22..51f98adf8da46 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -43,7 +43,6 @@ extern "C" { GGML_API ggml_backend_buffer_type_t ggml_backend_buffer_get_type (ggml_backend_buffer_t buffer); GGML_API void ggml_backend_buffer_reset (ggml_backend_buffer_t buffer); - // // Backend // diff --git a/ggml-cuda.cu b/ggml-cuda.cu index b0435b81f8ab7..95548bead771c 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -9899,6 +9899,10 @@ static const char * ggml_backend_cuda_buffer_get_name(ggml_backend_buffer_t buff return ctx->name.c_str(); } +static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer) { + return buffer->iface.get_name == ggml_backend_cuda_buffer_get_name; +} + static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context; CUDA_CHECK(cudaFree(ctx->dev_ptr)); @@ -9961,6 +9965,24 @@ static void ggml_backend_cuda_buffer_get_tensor(ggml_backend_buffer_t buffer, co ggml_cuda_set_device(ctx->device); CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaMemcpy(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost)); + CUDA_CHECK(cudaDeviceSynchronize()); +} + +static bool ggml_backend_cuda_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { + if (ggml_backend_buffer_is_cuda(src->buffer)) { + ggml_backend_cuda_buffer_context * src_ctx = (ggml_backend_cuda_buffer_context *)src->buffer->context; + ggml_backend_cuda_buffer_context * dst_ctx = (ggml_backend_cuda_buffer_context *)buffer->context; + + ggml_cuda_set_device(src_ctx->device); + CUDA_CHECK(cudaDeviceSynchronize()); + ggml_cuda_set_device(dst_ctx->device); + CUDA_CHECK(cudaDeviceSynchronize()); + CUDA_CHECK(cudaMemcpy((char *)dst->data, (const char *)src->data, ggml_nbytes(src), cudaMemcpyDeviceToDevice)); + CUDA_CHECK(cudaDeviceSynchronize()); + + return true; + } + return false; } static void ggml_backend_cuda_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { @@ -9979,8 +10001,7 @@ static ggml_backend_buffer_i ggml_backend_cuda_buffer_interface = { /* .init_tensor = */ ggml_backend_cuda_buffer_init_tensor, /* .set_tensor = */ ggml_backend_cuda_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cuda_buffer_get_tensor, - /* .cpy_tensor_from = */ NULL, - /* .cpy_tensor_to = */ NULL, + /* .cpy_tensor = */ ggml_backend_cuda_buffer_cpy_tensor, /* .clear = */ ggml_backend_cuda_buffer_clear, /* .reset = */ NULL, }; @@ -10114,6 +10135,11 @@ static const char * ggml_backend_cuda_split_buffer_get_name(ggml_backend_buffer_ UNUSED(buffer); } +// unused at the moment +//static bool ggml_backend_buffer_is_cuda_split(ggml_backend_buffer_t buffer) { +// return buffer->iface.get_name == ggml_backend_cuda_split_buffer_get_name; +//} + static void ggml_backend_cuda_split_buffer_free_buffer(ggml_backend_buffer_t buffer) { ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context; delete ctx; @@ -10256,8 +10282,7 @@ static struct ggml_backend_buffer_i ggml_backend_cuda_split_buffer_interface = { /* .init_tensor = */ ggml_backend_cuda_split_buffer_init_tensor, /* .set_tensor = */ ggml_backend_cuda_split_buffer_set_tensor, /* .get_tensor = */ ggml_backend_cuda_split_buffer_get_tensor, - /* .cpy_tensor_from = */ NULL, - /* .cpy_tensor_to = */ NULL, + /* .cpy_tensor = */ NULL, /* .clear = */ ggml_backend_cuda_split_buffer_clear, /* .reset = */ NULL, }; @@ -10457,6 +10482,17 @@ static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggm CUDA_CHECK(cudaMemcpyAsync(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost, g_cudaStreams[cuda_ctx->device][0])); } +static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend, const ggml_tensor * src, ggml_tensor * dst) { + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; + + if (dst->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device) && ggml_backend_buffer_is_cuda(src->buffer)) { + CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, g_cudaStreams[cuda_ctx->device][0])); + return true; + } + + return false; +} + static void ggml_backend_cuda_synchronize(ggml_backend_t backend) { ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context; @@ -10476,8 +10512,9 @@ static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph for (int i = 0; i < cgraph->n_nodes; i++) { ggml_tensor * node = cgraph->nodes[i]; - if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE) + if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) { continue; + } #ifndef NDEBUG assert(node->backend == GGML_BACKEND_GPU || node->backend == GGML_BACKEND_GPU_SPLIT); @@ -10487,7 +10524,7 @@ static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph for (int j = 0; j < GGML_MAX_SRC; j++) { if (node->src[j] != nullptr) { assert(node->src[j]->backend == GGML_BACKEND_GPU || node->src[j]->backend == GGML_BACKEND_GPU_SPLIT); - //assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); + assert(node->src[j]->buffer->buft == ggml_backend_cuda_buffer_type(cuda_ctx->device)); assert(node->src[j]->extra != nullptr); } } @@ -10500,8 +10537,6 @@ static bool ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph GGML_ASSERT(ok); } - UNUSED(backend); - return true; } @@ -10622,8 +10657,7 @@ static ggml_backend_i ggml_backend_cuda_interface = { /* .get_default_buffer_type = */ ggml_backend_cuda_get_default_buffer_type, /* .set_tensor_async = */ ggml_backend_cuda_set_tensor_async, /* .get_tensor_async = */ ggml_backend_cuda_get_tensor_async, - /* .cpy_tensor_from_async = */ NULL, - /* .cpy_tensor_to_async = */ NULL, + /* .cpy_tensor_async = */ ggml_backend_cuda_cpy_tensor_async, /* .synchronize = */ ggml_backend_cuda_synchronize, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, diff --git a/ggml-metal.m b/ggml-metal.m index 27e38a3be8da7..28c02a4902a2d 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2525,14 +2525,12 @@ static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, c UNUSED(buffer); } -static void ggml_backend_metal_buffer_cpy_tensor_from(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { - ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src)); - - UNUSED(buffer); -} - -static void ggml_backend_metal_buffer_cpy_tensor_to(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { - ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src)); +static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { + if (ggml_backend_buffer_is_host(src->buffer)) { + memcpy(dst->data, src->data, ggml_nbytes(src)); + return true; + } + return false; UNUSED(buffer); } @@ -2550,8 +2548,7 @@ static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_ /* .init_tensor = */ NULL, /* .set_tensor = */ ggml_backend_metal_buffer_set_tensor, /* .get_tensor = */ ggml_backend_metal_buffer_get_tensor, - /* .cpy_tensor_from = */ ggml_backend_metal_buffer_cpy_tensor_from, - /* .cpy_tensor_to = */ ggml_backend_metal_buffer_cpy_tensor_to, + /* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor, /* .clear = */ ggml_backend_metal_buffer_clear, /* .reset = */ NULL, }; @@ -2774,8 +2771,7 @@ static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct /* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type, /* .set_tensor_async = */ NULL, /* .get_tensor_async = */ NULL, - /* .cpy_tensor_from_async = */ NULL, - /* .cpy_tensor_to_async = */ NULL, + /* .cpy_tensor_async = */ NULL, /* .synchronize = */ NULL, /* .graph_plan_create = */ NULL, /* .graph_plan_free = */ NULL, From e73009ea514ec35189fa98270f096ea8d81093ea Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 12 Jan 2024 03:53:39 +0100 Subject: [PATCH 32/34] use async memcpys to copy the graph outputs to the CPU --- ggml-backend.c | 22 ++++++++++++++++++++++ ggml-backend.h | 8 +++++--- llama.cpp | 15 +++++++++++---- 3 files changed, 38 insertions(+), 7 deletions(-) diff --git a/ggml-backend.c b/ggml-backend.c index 62ffd07766af6..564b27c6c9669 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -772,6 +772,8 @@ struct ggml_backend_sched_split { }; struct ggml_backend_sched { + bool is_reset; // true if the scheduler has been reset since the last graph split + int n_backends; ggml_backend_t backends[GGML_MAX_BACKENDS]; ggml_tallocr_t tallocs[GGML_MAX_BACKENDS]; @@ -968,6 +970,7 @@ static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, co static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { // reset splits sched->n_splits = 0; + sched->is_reset = false; struct ggml_init_params params = { /* .mem_size = */ sizeof(sched->context_buffer), @@ -1327,6 +1330,8 @@ static void sched_reset(ggml_backend_sched_t sched) { memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size); memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size); + + sched->is_reset = true; } ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends, size_t graph_size) { @@ -1352,6 +1357,8 @@ ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_bac sched->tallocs[i] = ggml_tallocr_new_measure_from_backend(backends[i]); } + sched_reset(sched); + return sched; } @@ -1389,9 +1396,16 @@ void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgr void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) { GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS); + if (!sched->is_reset) { + sched_reset(sched); + } + sched_split_graph(sched, graph); sched_alloc_splits(sched); sched_compute_splits(sched); +} + +void ggml_backend_sched_reset(ggml_backend_sched_t sched) { sched_reset(sched); } @@ -1417,6 +1431,14 @@ void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml node_allocr(node) = sched->tallocs[backend_index]; } +ggml_backend_t ggml_backend_sched_get_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) { + ggml_tallocr_t allocr = node_allocr(node); + if (allocr == NULL) { + return NULL; + } + return get_allocr_backend(sched, allocr); +} + // utils void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) { diff --git a/ggml-backend.h b/ggml-backend.h index 51f98adf8da46..67c63caaf7fca 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -160,11 +160,13 @@ extern "C" { GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend); GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend); + GGML_API ggml_backend_t ggml_backend_sched_get_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node); // Allocate and compute graph on the backend scheduler - GGML_API void ggml_backend_sched_graph_compute( - ggml_backend_sched_t sched, - struct ggml_cgraph * graph); + GGML_API void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph); + + // Reset all assignments and allocators - must be called before using the sched allocators to allocate inputs + GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched); // // Utils diff --git a/llama.cpp b/llama.cpp index e3979865c029f..1478854197913 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6218,6 +6218,8 @@ static int llama_decode_internal( //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head); + ggml_backend_sched_reset(lctx.sched); + ggml_cgraph * gf = llama_build_graph(lctx, batch); // the output is always the last tensor in the graph @@ -6311,30 +6313,33 @@ static int llama_decode_internal( logits_out.clear(); #endif + ggml_backend_t res_backend = ggml_backend_sched_get_node_backend(lctx.sched, res); + GGML_ASSERT(res_backend != nullptr); if (batch.logits) { logits_out.resize(n_vocab * n_tokens); for (uint32_t i = 0; i < n_tokens; i++) { if (batch.logits[i] == 0) { continue; } - ggml_backend_tensor_get(res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float)); + ggml_backend_tensor_get_async(res_backend, res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float)); #ifndef NDEBUG logits_valid[i] = true; #endif } } else if (lctx.logits_all) { logits_out.resize(n_vocab * n_tokens); - ggml_backend_tensor_get(res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float)); + ggml_backend_tensor_get_async(res_backend, res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float)); #ifndef NDEBUG std::fill(logits_valid.begin(), logits_valid.end(), true); #endif } else { logits_out.resize(n_vocab); - ggml_backend_tensor_get(res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float)); + ggml_backend_tensor_get_async(res_backend, res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float)); #ifndef NDEBUG logits_valid[0] = true; #endif } + ggml_backend_synchronize(res_backend); } // extract embeddings @@ -6342,7 +6347,9 @@ static int llama_decode_internal( auto & embedding_out = lctx.embedding; embedding_out.resize(n_embd); - ggml_backend_tensor_get(embeddings, embedding_out.data(), (n_embd*(n_tokens - 1))*sizeof(float), n_embd*sizeof(float)); + ggml_backend_t embeddings_backend = ggml_backend_sched_get_node_backend(lctx.sched, embeddings); + ggml_backend_tensor_get_async(embeddings_backend, embeddings, embedding_out.data(), (n_embd*(n_tokens - 1))*sizeof(float), n_embd*sizeof(float)); + ggml_backend_synchronize(embeddings_backend); } // measure the performance only for the single-token evals From 1e7694eee7ff39b9fef7cd19fad9ad4892725357 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 12 Jan 2024 04:16:15 +0100 Subject: [PATCH 33/34] fix opencl --- ggml-opencl.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 732e24c9129aa..2bb93638f1c7c 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -2004,8 +2004,7 @@ static ggml_backend_buffer_i ggml_backend_opencl_buffer_interface = { /* .init_tensor = */ ggml_backend_opencl_buffer_init_tensor, /* .set_tensor = */ ggml_backend_opencl_buffer_set_tensor, /* .get_tensor = */ ggml_backend_opencl_buffer_get_tensor, - /* .cpy_tensor_from = */ NULL, - /* .cpy_tensor_to = */ NULL, + /* .cpy_tensor = */ NULL, /* .clear = */ ggml_backend_opencl_buffer_clear, /* .reset = */ ggml_backend_opencl_buffer_reset, }; From 53ae0dd86259f1df66ad4994cae364b7d92c23d1 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 12 Jan 2024 17:40:55 +0100 Subject: [PATCH 34/34] use a host buffer for the cpu compute buffer for faster copies to the gpu --- ggml-alloc.c | 17 +++++++++++++---- ggml-alloc.h | 4 +++- ggml-backend.c | 8 +++++--- ggml-backend.h | 2 +- llama.cpp | 15 +++++++++++++-- 5 files changed, 35 insertions(+), 11 deletions(-) diff --git a/ggml-alloc.c b/ggml-alloc.c index 4d1ceb0477a7a..89b85d34870d7 100644 --- a/ggml-alloc.c +++ b/ggml-alloc.c @@ -265,9 +265,9 @@ ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment) { return alloc; } -ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend) { +ggml_tallocr_t ggml_tallocr_new_measure_from_buft(struct ggml_backend_buffer_type * buft) { // create a backend buffer to get the correct tensor allocation sizes - ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, 1); + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, 1); // TODO: move alloc initialization to a common ggml_tallocr_new_impl function ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer); @@ -277,13 +277,22 @@ ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backe return alloc; } -ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size) { - ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, size); +ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend) { + return ggml_tallocr_new_measure_from_buft(ggml_backend_get_default_buffer_type(backend)); +} + +ggml_tallocr_t ggml_tallocr_new_from_buft(struct ggml_backend_buffer_type * buft, size_t size) { + // create a backend buffer to get the correct tensor allocation sizes + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer); alloc->buffer_owned = true; return alloc; } +ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size) { + return ggml_tallocr_new_from_buft(ggml_backend_get_default_buffer_type(backend), size); +} + ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer) { ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr)); diff --git a/ggml-alloc.h b/ggml-alloc.h index 64a412468915b..4e59975213406 100644 --- a/ggml-alloc.h +++ b/ggml-alloc.h @@ -52,8 +52,10 @@ typedef struct ggml_tallocr * ggml_tallocr_t; GGML_API ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment); GGML_API ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment); -GGML_API ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer); +GGML_API ggml_tallocr_t ggml_tallocr_new_from_buft(struct ggml_backend_buffer_type * buft, size_t size); GGML_API ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer +GGML_API ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer); +GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_buft(struct ggml_backend_buffer_type * buft); GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend); GGML_API struct ggml_backend_buffer * ggml_tallocr_get_buffer(ggml_tallocr_t talloc); diff --git a/ggml-backend.c b/ggml-backend.c index 564b27c6c9669..4c2d8b0b26f18 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -776,6 +776,7 @@ struct ggml_backend_sched { int n_backends; ggml_backend_t backends[GGML_MAX_BACKENDS]; + ggml_backend_buffer_type_t bufts[GGML_MAX_BACKENDS]; ggml_tallocr_t tallocs[GGML_MAX_BACKENDS]; ggml_gallocr_t galloc; @@ -1334,7 +1335,7 @@ static void sched_reset(ggml_backend_sched_t sched) { sched->is_reset = true; } -ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends, size_t graph_size) { +ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size) { GGML_ASSERT(n_backends > 0); GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS); @@ -1348,13 +1349,14 @@ ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_bac sched->n_backends = n_backends; for (int i = 0; i < n_backends; i++) { sched->backends[i] = backends[i]; + sched->bufts[i] = bufts ? bufts[i] : ggml_backend_get_default_buffer_type(backends[i]); } sched->galloc = ggml_gallocr_new(); // init measure allocs for each backend for (int i = 0; i < n_backends; i++) { - sched->tallocs[i] = ggml_tallocr_new_measure_from_backend(backends[i]); + sched->tallocs[i] = ggml_tallocr_new_measure_from_buft(sched->bufts[i]); } sched_reset(sched); @@ -1387,7 +1389,7 @@ void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgr for (int i = 0; i < sched->n_backends; i++) { size_t size = ggml_tallocr_max_size(sched->tallocs[i]); ggml_tallocr_free(sched->tallocs[i]); - sched->tallocs[i] = ggml_tallocr_new_from_backend(sched->backends[i], size); + sched->tallocs[i] = ggml_tallocr_new_from_buft(sched->bufts[i], size); } sched_reset(sched); diff --git a/ggml-backend.h b/ggml-backend.h index 67c63caaf7fca..4eb244af1d3e7 100644 --- a/ggml-backend.h +++ b/ggml-backend.h @@ -149,7 +149,7 @@ extern "C" { typedef struct ggml_backend_sched * ggml_backend_sched_t; // Initialize a backend scheduler - GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends, size_t graph_size); + GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size); GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched); // Initialize backend buffers from a measure graph GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph); diff --git a/llama.cpp b/llama.cpp index d4a2d3c265411..9afb1b9765264 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9368,10 +9368,21 @@ struct llama_context * llama_new_context_with_model( } { + // buffer types used for the compute buffer of each backend + std::vector backend_buft; + for (auto * backend : ctx->backends) { + if (ggml_backend_is_cpu(backend)) { + // use host buffers for the CPU backend compute buffer + backend_buft.push_back(llama_default_buffer_type_cpu(true)); + } else { + backend_buft.push_back(ggml_backend_get_default_buffer_type(backend)); + } + } + // buffer used to store the computation graph and the tensor meta data ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead()); - ctx->sched = ggml_backend_sched_new(ctx->backends.data(), ctx->backends.size(), LLAMA_MAX_NODES); + ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), LLAMA_MAX_NODES); ctx->alloc = ggml_backend_sched_get_tallocr(ctx->sched, ctx->backend_cpu); // build worst-case graph @@ -9390,7 +9401,7 @@ struct llama_context * llama_new_context_with_model( for (ggml_backend_t backend : ctx->backends) { ggml_backend_buffer_t buf = ggml_backend_sched_get_buffer(ctx->sched, backend); LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, - ggml_backend_name(backend), + ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0); } }