summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteffen Röcker <sroecker@gmail.com>2024-05-18 10:04:55 +0200
committerGitHub <noreply@github.com>2024-05-18 11:04:55 +0300
commit0f98acfac6cc561dc57586bfff778405e42b576b (patch)
tree5ced0f623f9124ae87bc02566bf717636fbfbbac
parentca57e0f35e33f714b9a6c2c4482b87bfe059c819 (diff)
llama : add support for larger Granite Code Models (20B, 34B) (#7324)
Tie the weights for ARCH_STARCODER to support the larger Granite code models. Partially addresses ggerganov/issues/7116 There still remains to be a few things to fix. Currently requires `--override-kv tokenizer.ggml.add_bos_token=bool:false`
-rw-r--r--llama.cpp9
1 files changed, 8 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index b752ddc6..2b91eec8 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5188,7 +5188,14 @@ static bool llm_load_tensors(
{
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
+ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false);
+ if (!model.output) {
+ // needs to be on GPU
+ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
+ ml.n_created--; // artificial tensor
+ ml.size_data += ggml_nbytes(model.output);
+ }
+
}
for (int i = 0; i < n_layer; ++i) {