summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorEbey Abraham <ebey97@gmail.com>2023-12-18 17:27:47 +0000
committerGitHub <noreply@github.com>2023-12-18 19:27:47 +0200
commitb9e74f9bca5fdf7d0a22ed25e7a9626335fdfa48 (patch)
treeb150a0d4490627bfc9cdd758d08d026fc70b0882 /llama.cpp
parent3c04bf6da89eaf4c7d317e0518f0687dfcbf2de7 (diff)
llama : add phi-2 + fix NeoX rope + ggml_mul_mat_set_prec (#4490)
* phi2 implementation * fix breaking change * phi-2 : various fixes * phi-2 : use layer norm eps * py : whitespaces * llama : fix meta KV override bug * convert : phi don't add BOS token * convert : revert "added_tokens_decoder" change * phi-2 : scale Q instead of KQ for better precision * ggml : fix NeoX rope to rotate just first n_dims * cuda : less diff in the rope_neox kernel * ggml : add ggml_mul_mat_set_prec ggml-ci * Update ggml-cuda.cu Co-authored-by: slaren <slarengh@gmail.com> * Update ggml-cuda.cu Co-authored-by: slaren <slarengh@gmail.com> * cuda : ggml_cuda_op_mul_mat_cublas support F32 precision * cuda : remove oboslete comment --------- Co-authored-by: Ebey Abraham <ebeyabraham@microsoft.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: slaren <slarengh@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp307
1 files changed, 275 insertions, 32 deletions
diff --git a/llama.cpp b/llama.cpp
index 99facbf7..edd2910b 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -195,6 +195,7 @@ enum llm_arch {
LLM_ARCH_BLOOM,
LLM_ARCH_STABLELM,
LLM_ARCH_QWEN,
+ LLM_ARCH_PHI2,
LLM_ARCH_UNKNOWN,
};
@@ -212,6 +213,7 @@ static std::map<llm_arch, std::string> LLM_ARCH_NAMES = {
{ LLM_ARCH_BLOOM, "bloom" },
{ LLM_ARCH_STABLELM, "stablelm" },
{ LLM_ARCH_QWEN, "qwen" },
+ { LLM_ARCH_PHI2, "phi2" },
};
enum llm_kv {
@@ -550,6 +552,19 @@ static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES =
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
},
},
+ {
+ LLM_ARCH_PHI2,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
{
LLM_ARCH_UNKNOWN,
@@ -1420,6 +1435,7 @@ struct llama_model {
struct ggml_tensor * output_norm;
struct ggml_tensor * output_norm_b;
struct ggml_tensor * output;
+ struct ggml_tensor * output_b;
std::vector<llama_layer> layers;
@@ -2635,6 +2651,15 @@ static void llm_load_hparams(
default: model.type = e_model::MODEL_UNKNOWN;
}
} break;
+ case LLM_ARCH_PHI2:
+ {
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
+
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_3B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
default: (void)0;
}
@@ -2987,7 +3012,7 @@ static void llm_load_tensors(
(void) main_gpu;
- enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU;
+ enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU;
enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU;
#ifdef GGML_USE_CUBLAS
@@ -3630,7 +3655,73 @@ static void llm_load_tensors(
}
}
} break;
+ case LLM_ARCH_PHI2:
+ {
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+ model.output_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ vram_weights += ggml_nbytes(model.output_norm_b);
+ vram_weights += ggml_nbytes(model.output);
+ vram_weights += ggml_nbytes(model.output_b);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
+
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
+
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
+
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
+ layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
+
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
+ ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) +
+ ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) +
+ ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) +
+ ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b);
+ }
+ }
+ } break;
default:
throw std::runtime_error("unknown architecture");
}
@@ -3991,6 +4082,7 @@ static struct ggml_tensor * llm_build_ffn(
// if max_alibi_bias > 0 then apply ALiBi
static struct ggml_tensor * llm_build_kqv(
struct ggml_context * ctx,
+ const llama_model & model,
const llama_hparams & hparams,
const llama_kv_cache & kv,
struct ggml_tensor * wo,
@@ -4002,6 +4094,7 @@ static struct ggml_tensor * llm_build_kqv(
int32_t n_tokens,
int32_t n_kv,
float max_alibi_bias,
+ float scale,
const llm_build_cb & cb,
int il) {
const int64_t n_embd = hparams.n_embd;
@@ -4024,6 +4117,12 @@ static struct ggml_tensor * llm_build_kqv(
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
cb(kq, "kq", il);
+ if (model.arch == LLM_ARCH_PHI2) {
+ // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
+ // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
+ ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
+ }
+
if (max_alibi_bias > 0.0f) {
// temporary branch until we figure out how to handle ggml_alibi through ggml_add
kq = ggml_scale(ctx, kq, kq_scale);
@@ -4043,7 +4142,7 @@ static struct ggml_tensor * llm_build_kqv(
kq = ggml_soft_max(ctx, kq);
cb(kq, "kq_soft_max", il);
} else {
- kq = ggml_soft_max_ext(ctx, kq, kq_mask, 1.0f/sqrtf(float(n_embd_head)));
+ kq = ggml_soft_max_ext(ctx, kq, kq_mask, scale);
cb(kq, "kq_soft_max_ext", il);
}
@@ -4250,9 +4349,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, model.layers[il].bo,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -4433,9 +4532,9 @@ struct llm_build_context {
// apply ALiBi for 13B model
const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f;
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -4557,9 +4656,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -4657,9 +4756,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, model.layers[il].bo,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -4866,9 +4965,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
// TODO: not tested, could be broken
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, model.layers[il].bo,
- Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -4957,9 +5056,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -5054,9 +5153,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, model.layers[il].bo,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -5148,9 +5247,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -5261,9 +5360,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -5320,15 +5419,15 @@ struct llm_build_context {
cb(inpL, "inp_embd", -1);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos= ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
cb(inp_pos, "inp_pos", -1);
// KQ_scale
- struct ggml_tensor * KQ_scale= ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
cb(KQ_scale, "KQ_scale", -1);
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask= ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
// shift the entire K-cache if needed
@@ -5378,9 +5477,9 @@ struct llm_build_context {
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
@@ -5424,6 +5523,122 @@ struct llm_build_context {
return gf;
}
+ struct ggml_cgraph * build_phi2() {
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * attn_norm_output;
+ struct ggml_tensor * ffn_output;
+ struct ggml_tensor * inpL;
+
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
+
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+
+ // Q_scale
+ struct ggml_tensor * Q_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(Q_scale, "Q_scale", -1);
+
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ // shift the entire K-cache if needed
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
+ }
+
+ for (int il = 0; il < n_layer; ++il) {
+ attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ model.layers[il].attn_norm_b,
+ LLM_NORM, cb, il);
+ cb(attn_norm_output, "attn_norm", il);
+
+ // self-attention
+ {
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
+ cb(cur, "wqkv", il);
+
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
+
+ struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
+
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+
+ Qcur = ggml_rope_custom(
+ ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Qcur, "Qcur", il);
+
+ Qcur = ggml_scale(ctx0, Qcur, Q_scale);
+ cb(Qcur, "Qcur", il);
+
+ Kcur = ggml_rope_custom(
+ ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Kcur, "Kcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f, cb, il);
+ cb(cur, "kqv_out", il);
+ }
+
+ // FF
+ {
+ ffn_output = llm_build_ffn(ctx0, attn_norm_output,
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b,
+ NULL, NULL,
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b,
+ LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
+ cb(ffn_output, "ffn_out", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_output);
+ cb(cur, "l_out", il);
+
+ cur = ggml_add(ctx0, cur, inpL);
+ cb(cur, "l_out", il);
+
+ inpL = cur;
+ }
+
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.output_norm,
+ model.output_norm_b,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output_no_bias", -1);
+
+ cur = ggml_add(ctx0, cur, model.output_b);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
};
//
@@ -5437,7 +5652,7 @@ enum llm_offload_func_e {
OFFLOAD_FUNC_FRC, // force offload
OFFLOAD_FUNC_KQV,
OFFLOAD_FUNC_NR,
- OFFLOAD_FUNC_EMB,
+ OFFLOAD_FUNC_EMB, // embeddings
OFFLOAD_FUNC_OUT,
};
@@ -5522,6 +5737,7 @@ static const std::unordered_map<const char *, llm_offload_func_e> k_offload_map
{ "pos_embd", OFFLOAD_FUNC_NR },
{ "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope)
+ { "Q_scale", OFFLOAD_FUNC_FRC },
{ "KQ_scale", OFFLOAD_FUNC_FRC },
{ "KQ_mask", OFFLOAD_FUNC_FRC },
{ "K_shift", OFFLOAD_FUNC_FRC },
@@ -5606,6 +5822,7 @@ static const std::unordered_map<const char *, llm_offload_func_e> k_offload_map
{ "l_out", OFFLOAD_FUNC },
{ "result_norm", OFFLOAD_FUNC_EMB },
+ { "result_output_no_bias", OFFLOAD_FUNC_EMB },
{ "result_output", OFFLOAD_FUNC_OUT },
};
@@ -5623,6 +5840,7 @@ static struct ggml_cgraph * llama_build_graph(
bool alloc_inp_tokens = false;
bool alloc_inp_embd = false;
bool alloc_inp_pos = false;
+ bool alloc_inp_Q_scale = false;
bool alloc_inp_KQ_scale = false;
bool alloc_inp_KQ_mask = false;
bool alloc_inp_K_shift = false;
@@ -5690,7 +5908,7 @@ static struct ggml_cgraph * llama_build_graph(
alloc_inp_pos = true;
}
- if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) {
+ if (!alloc_inp_Q_scale && strcmp(name, "Q_scale") == 0) {
ggml_allocr_alloc(lctx.alloc, cur);
if (!ggml_allocr_is_measure(lctx.alloc)) {
@@ -5698,6 +5916,23 @@ static struct ggml_cgraph * llama_build_graph(
ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head)));
}
+ alloc_inp_Q_scale = true;
+ }
+
+ if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) {
+ ggml_allocr_alloc(lctx.alloc, cur);
+
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ const int64_t n_embd_head = model.hparams.n_embd_head();
+ if (model.arch == LLM_ARCH_PHI2) {
+ // with phi2, we scale the Q to avoid precision issues
+ // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
+ ggml_set_f32(cur, 1.0f);
+ } else {
+ ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head)));
+ }
+ }
+
alloc_inp_KQ_scale = true;
}
@@ -5922,6 +6157,10 @@ static struct ggml_cgraph * llama_build_graph(
{
result = llm.build_qwen();
} break;
+ case LLM_ARCH_PHI2:
+ {
+ result = llm.build_phi2();
+ } break;
default:
GGML_ASSERT(false);
}
@@ -6055,12 +6294,16 @@ static int llama_decode_internal(
ggml_allocr_alloc_graph(lctx.alloc, gf);
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
- struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
-
- GGML_ASSERT(strcmp(res->name, "result_output") == 0);
- GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+ // the output is always the last tensor in the graph
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ GGML_ASSERT(strcmp(res->name, "result_output") == 0);
+ // the embeddings could be the second to last tensor, or the third to last tensor
+ struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
+ if (strcmp(embeddings->name, "result_norm") != 0) {
+ embeddings = gf->nodes[gf->n_nodes - 3];
+ GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+ }
#ifdef GGML_USE_CUBLAS
for (int i = 0; i < gf->n_leafs; i++) {