From 5839d9e6df51f9f0acbc6190ec1d076f4a54ab13 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 14 May 2023 15:10:32 +0300 Subject: [PATCH] examples : use inplace calls explicitly --- examples/common.cpp | 1 - examples/dolly-v2/main.cpp | 12 ++++++------ examples/gpt-2/main.cpp | 8 ++++---- examples/gpt-j/main.cpp | 12 ++++++------ examples/gpt-neox/main.cpp | 12 ++++++------ examples/starcoder/main.cpp | 8 ++++---- 6 files changed, 26 insertions(+), 27 deletions(-) diff --git a/examples/common.cpp b/examples/common.cpp index ba0e9522c..a8461fb4d 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -212,7 +212,6 @@ void gpt_vocab::add_special_token(const std::string & token) { special_tokens.push_back(token); } - std::vector gpt_tokenize(const gpt_vocab & vocab, const std::string & text) { std::vector words; diff --git a/examples/dolly-v2/main.cpp b/examples/dolly-v2/main.cpp index 2028eb5bc..d693296bf 100644 --- a/examples/dolly-v2/main.cpp +++ b/examples/dolly-v2/main.cpp @@ -479,8 +479,8 @@ bool dollyv2_eval( struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head)); // using mode = 2 for GPT-NeoX mode - Qcur = ggml_rope(ctx0, Qcur, n_past, n_rot, 2); - Kcur = ggml_rope(ctx0, Kcur, n_past, n_rot, 2); + Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2); + Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2); // store key and value to memory { @@ -514,16 +514,16 @@ bool dollyv2_eval( // KQ_scaled = KQ / sqrt(n_embd/n_head) struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, + ggml_scale_inplace(ctx0, KQ, ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) ); // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() struct ggml_tensor * V = @@ -622,7 +622,7 @@ bool dollyv2_eval( } // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); + //inpL = ggml_soft_max_inplace(ctx0, inpL); // run the computation ggml_build_forward_expand(&gf, inpL); diff --git a/examples/gpt-2/main.cpp b/examples/gpt-2/main.cpp index db149c811..d8f4bdd7e 100644 --- a/examples/gpt-2/main.cpp +++ b/examples/gpt-2/main.cpp @@ -527,18 +527,18 @@ bool gpt2_eval( // KQ_scaled = KQ / sqrt(n_embd/n_head) // [n_past + N, N, 12] struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, + ggml_scale_inplace(ctx0, KQ, ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) ); // KQ_masked = mask_past(KQ_scaled) // [n_past + N, N, 12] - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) // [n_past + N, N, 12] - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // [n_past + N, 64, 12] @@ -665,7 +665,7 @@ bool gpt2_eval( inpL = ggml_mul_mat(ctx0, model.lm_head, inpL); // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); + //inpL = ggml_soft_max_inplace(ctx0, inpL); // run the computation ggml_build_forward_expand(&gf, inpL); diff --git a/examples/gpt-j/main.cpp b/examples/gpt-j/main.cpp index 7c9197a99..d8e37c80a 100644 --- a/examples/gpt-j/main.cpp +++ b/examples/gpt-j/main.cpp @@ -446,8 +446,8 @@ bool gptj_eval( // self-attention { - struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0); - struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0); + struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0); + struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0); // store key and value to memory { @@ -481,16 +481,16 @@ bool gptj_eval( // KQ_scaled = KQ / sqrt(n_embd/n_head) struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, + ggml_scale_inplace(ctx0, KQ, ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) ); // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() struct ggml_tensor * V = @@ -574,7 +574,7 @@ bool gptj_eval( } // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); + //inpL = ggml_soft_max_inplace(ctx0, inpL); // run the computation ggml_build_forward_expand(&gf, inpL); diff --git a/examples/gpt-neox/main.cpp b/examples/gpt-neox/main.cpp index 1c180331c..bd8144930 100644 --- a/examples/gpt-neox/main.cpp +++ b/examples/gpt-neox/main.cpp @@ -502,8 +502,8 @@ bool gpt_neox_eval( struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head)); // using mode = 2 for GPT-NeoX mode - Qcur = ggml_rope(ctx0, Qcur, n_past, n_rot, 2); - Kcur = ggml_rope(ctx0, Kcur, n_past, n_rot, 2); + Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2); + Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2); // store key and value to memory { @@ -537,16 +537,16 @@ bool gpt_neox_eval( // KQ_scaled = KQ / sqrt(n_embd/n_head) struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, + ggml_scale_inplace(ctx0, KQ, ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) ); // KQ_masked = mask_past(KQ_scaled) - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() struct ggml_tensor * V = @@ -621,7 +621,7 @@ bool gpt_neox_eval( } // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); + //inpL = ggml_soft_max_inplace(ctx0, inpL); // run the computation ggml_build_forward_expand(&gf, inpL); diff --git a/examples/starcoder/main.cpp b/examples/starcoder/main.cpp index 9b2836c64..d7a2d5ea3 100644 --- a/examples/starcoder/main.cpp +++ b/examples/starcoder/main.cpp @@ -537,18 +537,18 @@ bool starcoder_eval( // KQ_scaled = KQ / sqrt(n_embd/n_head) // [n_past + N, N, 12] struct ggml_tensor * KQ_scaled = - ggml_scale(ctx0, + ggml_scale_inplace(ctx0, KQ, ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) ); // KQ_masked = mask_past(KQ_scaled) // [n_past + N, N, 12] - struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past); + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); // KQ = soft_max(KQ_masked) // [n_past + N, N, 12] - struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked); + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() // [n_past + N, 64, 12] @@ -675,7 +675,7 @@ bool starcoder_eval( inpL = ggml_mul_mat(ctx0, model.lm_head, inpL); // logits -> probs - //inpL = ggml_soft_max(ctx0, inpL); + //inpL = ggml_soft_max_inplace(ctx0, inpL); // run the computation ggml_build_forward_expand(&gf, inpL);