Skip to content

Commit

Permalink
examples : use inplace calls explicitly
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed May 14, 2023
1 parent 5eeb19f commit 5839d9e
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 27 deletions.
1 change: 0 additions & 1 deletion examples/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,6 @@ void gpt_vocab::add_special_token(const std::string & token) {
special_tokens.push_back(token);
}


std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::string & text) {
std::vector<std::string> words;

Expand Down
12 changes: 6 additions & 6 deletions examples/dolly-v2/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -479,8 +479,8 @@ bool dollyv2_eval(
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));

// using mode = 2 for GPT-NeoX mode
Qcur = ggml_rope(ctx0, Qcur, n_past, n_rot, 2);
Kcur = ggml_rope(ctx0, Kcur, n_past, n_rot, 2);
Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2);
Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2);

// store key and value to memory
{
Expand Down Expand Up @@ -514,16 +514,16 @@ bool dollyv2_eval(

// KQ_scaled = KQ / sqrt(n_embd/n_head)
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0,
ggml_scale_inplace(ctx0,
KQ,
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
);

// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);

// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);

// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
struct ggml_tensor * V =
Expand Down Expand Up @@ -622,7 +622,7 @@ bool dollyv2_eval(
}

// logits -> probs
//inpL = ggml_soft_max(ctx0, inpL);
//inpL = ggml_soft_max_inplace(ctx0, inpL);

// run the computation
ggml_build_forward_expand(&gf, inpL);
Expand Down
8 changes: 4 additions & 4 deletions examples/gpt-2/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -527,18 +527,18 @@ bool gpt2_eval(
// KQ_scaled = KQ / sqrt(n_embd/n_head)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0,
ggml_scale_inplace(ctx0,
KQ,
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
);

// KQ_masked = mask_past(KQ_scaled)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);

// KQ = soft_max(KQ_masked)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);

// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
// [n_past + N, 64, 12]
Expand Down Expand Up @@ -665,7 +665,7 @@ bool gpt2_eval(
inpL = ggml_mul_mat(ctx0, model.lm_head, inpL);

// logits -> probs
//inpL = ggml_soft_max(ctx0, inpL);
//inpL = ggml_soft_max_inplace(ctx0, inpL);

// run the computation
ggml_build_forward_expand(&gf, inpL);
Expand Down
12 changes: 6 additions & 6 deletions examples/gpt-j/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -446,8 +446,8 @@ bool gptj_eval(

// self-attention
{
struct ggml_tensor * Qcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
struct ggml_tensor * Kcur = ggml_rope(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);

// store key and value to memory
{
Expand Down Expand Up @@ -481,16 +481,16 @@ bool gptj_eval(

// KQ_scaled = KQ / sqrt(n_embd/n_head)
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0,
ggml_scale_inplace(ctx0,
KQ,
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
);

// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);

// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);

// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
struct ggml_tensor * V =
Expand Down Expand Up @@ -574,7 +574,7 @@ bool gptj_eval(
}

// logits -> probs
//inpL = ggml_soft_max(ctx0, inpL);
//inpL = ggml_soft_max_inplace(ctx0, inpL);

// run the computation
ggml_build_forward_expand(&gf, inpL);
Expand Down
12 changes: 6 additions & 6 deletions examples/gpt-neox/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -502,8 +502,8 @@ bool gpt_neox_eval(
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));

// using mode = 2 for GPT-NeoX mode
Qcur = ggml_rope(ctx0, Qcur, n_past, n_rot, 2);
Kcur = ggml_rope(ctx0, Kcur, n_past, n_rot, 2);
Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2);
Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2);

// store key and value to memory
{
Expand Down Expand Up @@ -537,16 +537,16 @@ bool gpt_neox_eval(

// KQ_scaled = KQ / sqrt(n_embd/n_head)
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0,
ggml_scale_inplace(ctx0,
KQ,
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
);

// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);

// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);

// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
struct ggml_tensor * V =
Expand Down Expand Up @@ -621,7 +621,7 @@ bool gpt_neox_eval(
}

// logits -> probs
//inpL = ggml_soft_max(ctx0, inpL);
//inpL = ggml_soft_max_inplace(ctx0, inpL);

// run the computation
ggml_build_forward_expand(&gf, inpL);
Expand Down
8 changes: 4 additions & 4 deletions examples/starcoder/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -537,18 +537,18 @@ bool starcoder_eval(
// KQ_scaled = KQ / sqrt(n_embd/n_head)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0,
ggml_scale_inplace(ctx0,
KQ,
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
);

// KQ_masked = mask_past(KQ_scaled)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);

// KQ = soft_max(KQ_masked)
// [n_past + N, N, 12]
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);

// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
// [n_past + N, 64, 12]
Expand Down Expand Up @@ -675,7 +675,7 @@ bool starcoder_eval(
inpL = ggml_mul_mat(ctx0, model.lm_head, inpL);

// logits -> probs
//inpL = ggml_soft_max(ctx0, inpL);
//inpL = ggml_soft_max_inplace(ctx0, inpL);

// run the computation
ggml_build_forward_expand(&gf, inpL);
Expand Down

0 comments on commit 5839d9e

Please sign in to comment.