diff options
author | Steffen Röcker <sroecker@gmail.com> | 2024-05-18 10:04:55 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-18 11:04:55 +0300 |
commit | 0f98acfac6cc561dc57586bfff778405e42b576b (patch) | |
tree | 5ced0f623f9124ae87bc02566bf717636fbfbbac | |
parent | ca57e0f35e33f714b9a6c2c4482b87bfe059c819 (diff) |
llama : add support for larger Granite Code Models (20B, 34B) (#7324)
Tie the weights for ARCH_STARCODER to support the larger Granite code models.
Partially addresses ggerganov/issues/7116
There still remains to be a few things to fix.
Currently requires `--override-kv tokenizer.ggml.add_bos_token=bool:false`
-rw-r--r-- | llama.cpp | 9 |
1 files changed, 8 insertions, 1 deletions
@@ -5188,7 +5188,14 @@ static bool llm_load_tensors( { model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}); - model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false); + if (!model.output) { + // needs to be on GPU + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + ml.n_created--; // artificial tensor + ml.size_data += ggml_nbytes(model.output); + } + } for (int i = 0; i < n_layer; ++i) { |