Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

imatrix : offload to GPU support #4957

Merged
merged 10 commits into from
Jan 17, 2024
61 changes: 48 additions & 13 deletions examples/imatrix/imatrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,19 +33,43 @@ class IMatrixCollector {
public:
IMatrixCollector() = default;
void set_parameters(StatParams&& params) { m_params = std::move(params); }
void collect_imatrix(const struct ggml_tensor * src0, const struct ggml_tensor * src1);
bool collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data);
void save_imatrix() const;
private:
std::unordered_map<std::string, Stats> m_stats;
StatParams m_params;
std::mutex m_mutex;
int m_last_call = 0;
std::vector<float> m_src1_data;
};

void IMatrixCollector::collect_imatrix(const struct ggml_tensor * src0, const struct ggml_tensor * src1) {
if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return;
if (!(strncmp(src0->name, "blk.", 4) == 0 || (m_params.collect_output_weight && strcmp(src0->name, "output.weight") == 0))) return;
bool IMatrixCollector::collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
GGML_UNUSED(user_data);

const struct ggml_tensor * src0 = t->src[0];
const struct ggml_tensor * src1 = t->src[1];

// when ask is true, the scheduler wants to know if we are interested in data from this tensor
// if we return true, a follow-up call will be made with ask=false in which we can do the actual collection
if (ask) {
if (t->op != GGML_OP_MUL_MAT) return false;
if (src1->ne[1] < 16 || src1->type != GGML_TYPE_F32) return false;
if (!(strncmp(src0->name, "blk.", 4) == 0 || (m_params.collect_output_weight && strcmp(src0->name, "output.weight") == 0))) return false;
return true;
}

std::lock_guard<std::mutex> lock(m_mutex);

// copy the data from the GPU memory if needed
const bool is_host = ggml_backend_buffer_is_host(src1->buffer);

if (!is_host || !ggml_is_contiguous(src1)) {
m_src1_data.resize(ggml_nelements(src1));
ggml_backend_tensor_get(src1, m_src1_data.data(), 0, ggml_nbytes(src1));
}

const float * data = is_host ? (const float *) src1->data : m_src1_data.data();

auto& e = m_stats[src0->name];
if (e.values.empty()) {
e.values.resize(src1->ne[0], 0);
Expand All @@ -59,7 +83,7 @@ void IMatrixCollector::collect_imatrix(const struct ggml_tensor * src0, const st
printf("%s[%d]: %s, %d x %d, %d\n",__func__,m_last_call,src0->name,(int)src1->ne[0],(int)src1->ne[1],(int)src1->type);
}
for (int row = 0; row < (int)src1->ne[1]; ++row) {
const float * x = (const float *)src1->data + row * src1->ne[0];
const float * x = data + row * src1->ne[0];
for (int j = 0; j < (int)src1->ne[0]; ++j) {
e.values[j] += x[j]*x[j];
}
Expand All @@ -70,6 +94,8 @@ void IMatrixCollector::collect_imatrix(const struct ggml_tensor * src0, const st
save_imatrix();
}
}

return true;
}

void IMatrixCollector::save_imatrix() const {
Expand All @@ -93,8 +119,8 @@ void IMatrixCollector::save_imatrix() const {

static IMatrixCollector g_collector;

static void ik_collect_imatrix(const struct ggml_tensor * src0, const struct ggml_tensor * src1) {
g_collector.collect_imatrix(src0, src1);
static bool ik_collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data) {
return g_collector.collect_imatrix(t, ask, user_data);
}


Expand Down Expand Up @@ -320,8 +346,6 @@ int main(int argc, char ** argv) {

g_collector.set_parameters(std::move(sparams));

ggml_set_imatrix_collection(ik_collect_imatrix);

params.logits_all = true;
params.n_batch = std::min(params.n_batch, params.n_ctx);

Expand All @@ -340,16 +364,27 @@ int main(int argc, char ** argv) {

llama_backend_init(params.numa);

llama_model * model;
llama_context * ctx;
llama_model_params mparams = llama_model_params_from_gpt_params(params);

// load the model and apply lora adapter, if any
std::tie(model, ctx) = llama_init_from_gpt_params(params);
llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams);
if (model == NULL) {
fprintf(stderr, "%s: error: unable to load model\n", __func__);
return 1;
}

llama_context_params cparams = llama_context_params_from_gpt_params(params);

// pass the callback to the backend scheduler
// it will be executed for each node during the graph computation
cparams.cb_eval = ik_collect_imatrix;
cparams.cb_eval_user_data = NULL;

llama_context * ctx = llama_new_context_with_model(model, cparams);
if (ctx == NULL) {
fprintf(stderr, "%s: error: unable to create context\n", __func__);
return 1;
}

const int n_ctx_train = llama_n_ctx_train(model);
if (params.n_ctx > n_ctx_train) {
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
Expand Down
49 changes: 47 additions & 2 deletions examples/simple/simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,49 @@
#include <string>
#include <vector>

// a function that can be called for every computed node during graph evaluation
// the user can choose to whether to observe the data of the node depending on the tensor parameters
static bool observe_compute(struct ggml_tensor * t, bool ask, void * user_data) {
GGML_UNUSED(user_data);

// the scheduler is asking us if we want to observe this node
if (ask) {
// check if name contains soft_max (customize to your needs)
return strstr(t->name, "soft_max") != 0;
}

// print the node info
printf("%s: t->name = %32s, t->op = %12s, [%5d, %5d, %5d, %5d]\n",
__func__, t->name, ggml_op_name(t->op), (int) t->ne[0], (int) t->ne[1], (int) t->ne[2], (int) t->ne[3]);

// this will copy the data to host memory (if needed)
static std::vector<float> t_data;

const bool is_host = ggml_backend_buffer_is_host(t->buffer);

if (!is_host || !ggml_is_contiguous(t)) {
t_data.resize(ggml_nelements(t));
ggml_backend_tensor_get(t, t_data.data(), 0, ggml_nbytes(t));
}

const float * data = is_host ? (const float *) t->data : t_data.data();

// print first row
for (int i = 0; i < t->ne[0]; i++) {
printf("%8.4f ", data[i]);
}
printf("\n");

return true;
}

int main(int argc, char ** argv) {
gpt_params params;

bool observe = false;

if (argc == 1 || argv[1][0] == '-') {
printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]);
printf("usage: %s MODEL_PATH [PROMPT] [OBSERV]\n" , argv[0]);
return 1 ;
}

Expand All @@ -22,6 +60,10 @@ int main(int argc, char ** argv) {
params.prompt = argv[2];
}

if (argc >= 4) {
observe = atoi(argv[3]);
}

if (params.prompt.empty()) {
params.prompt = "Hello my name is";
}
Expand All @@ -37,7 +79,7 @@ int main(int argc, char ** argv) {

llama_model_params model_params = llama_model_default_params();

// model_params.n_gpu_layers = 99; // offload all layers to the GPU
model_params.n_gpu_layers = 99; // offload all layers to the GPU

llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);

Expand All @@ -55,6 +97,9 @@ int main(int argc, char ** argv) {
ctx_params.n_threads = params.n_threads;
ctx_params.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch;

ctx_params.cb_eval = observe ? observe_compute : NULL;
ctx_params.cb_eval_user_data = NULL;

llama_context * ctx = llama_new_context_with_model(model, ctx_params);

if (ctx == NULL) {
Expand Down
48 changes: 46 additions & 2 deletions ggml-backend.c
Original file line number Diff line number Diff line change
Expand Up @@ -802,6 +802,9 @@ struct ggml_backend_sched {
__attribute__((aligned(GGML_MEM_ALIGN)))
#endif
char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + sizeof(struct ggml_cgraph)];

ggml_backend_sched_eval_callback callback_eval;
void * callback_eval_user_data;
};

#define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node)
Expand Down Expand Up @@ -1324,9 +1327,40 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
ggml_graph_dump_dot(split->graph, NULL, split_filename);
#endif


uint64_t compute_start_us = ggml_time_us();
ggml_backend_graph_compute(split_backend, &split->graph);
//ggml_backend_synchronize(split_backend); // necessary to measure compute time
if (!sched->callback_eval) {
ggml_backend_graph_compute(split_backend, &split->graph);
//ggml_backend_synchronize(split_backend); // necessary to measure compute time
} else {
// similar to ggml_backend_compare_graph_backend
for (int j0 = 0; j0 < split->graph.n_nodes; j0++) {
struct ggml_tensor * t = split->graph.nodes[j0];

int j1 = j0;

// determine the range [j0, j1] of nodes that can be computed together
while (j1 < split->graph.n_nodes - 1) {
// check if the user needs data from this node
if (sched->callback_eval(t, true, sched->callback_eval_user_data)) {
break;
}

t = split->graph.nodes[++j1];
}

struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);

ggml_backend_graph_compute(split_backend, &gv);

if (sched->callback_eval(t, true, sched->callback_eval_user_data) && // ask
!sched->callback_eval(t, false, sched->callback_eval_user_data)) { // eval
break;
}

j0 = j1;
}
}
uint64_t compute_end_us = ggml_time_us();
compute_us[split_backend_id] += compute_end_us - compute_start_us;
}
Expand All @@ -1352,6 +1386,10 @@ static void sched_reset(ggml_backend_sched_t sched) {
memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);

// TODO: should we clear the callbacks?
//sched->callback_eval = NULL;
//sched->callback_eval_user_data = NULL;

sched->is_reset = true;
}

Expand Down Expand Up @@ -1431,6 +1469,12 @@ void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
sched_reset(sched);
}


void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
sched->callback_eval = callback;
sched->callback_eval_user_data = user_data;
}

int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
return sched->n_splits;
}
Expand Down
11 changes: 11 additions & 0 deletions ggml-backend.h
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,14 @@ extern "C" {
struct ggml_backend_sched;
typedef struct ggml_backend_sched * ggml_backend_sched_t;

// when ask == true, the scheduler wants to know if the user wants to observe this node
// this allows the scheduler to batch nodes together in order to evaluate them in a single call
//
// when ask == false, the scheduler is passing the node tensor to the user for observation
// if the user returns false, the scheduler will cancel the graph compute
//
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);

// Initialize a backend scheduler
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size);
GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
Expand All @@ -168,6 +176,9 @@ extern "C" {
// Reset all assignments and allocators - must be called before using the sched allocators to allocate inputs
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);

// Set a callback to be called for each resulting node during graph compute
GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data);

//
// Utils
//
Expand Down
14 changes: 0 additions & 14 deletions ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,12 +394,6 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);

ggml_collect_imatrix_t g_imatrix_collect = NULL;

void ggml_set_imatrix_collection(ggml_collect_imatrix_t imatrix_collect) {
g_imatrix_collect = imatrix_collect;
}

static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
[GGML_TYPE_I8] = {
.type_name = "i8",
Expand Down Expand Up @@ -9790,10 +9784,6 @@ static void ggml_compute_forward_mul_mat(
const int ith = params->ith;
const int nth = params->nth;

if (ith == 1 && g_imatrix_collect) {
g_imatrix_collect(src0, src1);
}

const enum ggml_type type = src0->type;

const bool src1_cont = ggml_is_contiguous(src1);
Expand Down Expand Up @@ -10097,10 +10087,6 @@ static void ggml_compute_forward_mul_mat_id(

const struct ggml_tensor * src0_cur = dst->src[cur_a + 2];

if (ith == 1 && g_imatrix_collect) {
g_imatrix_collect(src0_cur, src1);
}

const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
const size_t row_size = ggml_row_size(vec_dot_type, ne10);

Expand Down
6 changes: 0 additions & 6 deletions ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -2075,12 +2075,6 @@ extern "C" {
GGML_API void ggml_init_iq2_quantization(enum ggml_type type);
GGML_API void ggml_deinit_iq2_quantization(enum ggml_type type);

//
// Importance matrix
//
typedef void(*ggml_collect_imatrix_t)(const struct ggml_tensor * src0, const struct ggml_tensor * src1);
GGML_API void ggml_set_imatrix_collection(ggml_collect_imatrix_t imatrix_collect);

//
// gguf
//
Expand Down
9 changes: 9 additions & 0 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1393,6 +1393,9 @@ struct llama_cparams {

bool mul_mat_q;
bool offload_kqv;

ggml_backend_sched_eval_callback cb_eval;
void * cb_eval_user_data;
};

struct llama_layer {
Expand Down Expand Up @@ -6254,6 +6257,7 @@ static int llama_decode_internal(
//printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);

ggml_backend_sched_reset(lctx.sched);
ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);

ggml_cgraph * gf = llama_build_graph(lctx, batch);

Expand Down Expand Up @@ -9267,6 +9271,8 @@ struct llama_context_params llama_context_default_params() {
/*.logits_all =*/ false,
/*.embedding =*/ false,
/*.offload_kqv =*/ true,
/*.cb_eval =*/ nullptr,
/*.cb_eval_user_data =*/ nullptr,
};

return result;
Expand Down Expand Up @@ -9401,6 +9407,9 @@ struct llama_context * llama_new_context_with_model(
hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
hparams.n_ctx_train;

cparams.cb_eval = params.cb_eval;
cparams.cb_eval_user_data = params.cb_eval_user_data;

auto rope_scaling_type = params.rope_scaling_type;
if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) {
rope_scaling_type = hparams.rope_scaling_type_train;
Expand Down
Loading
Loading