summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-12-01 20:39:12 +0200
committerGeorgi Gerganov <ggerganov@gmail.com>2023-12-01 20:39:12 +0200
commit5a7d3125e7c24f223659b7f0b7aa7736986e92c0 (patch)
tree3c1dc34bf7c237e2076a1e6283e13e76e441b157
parentd5a1cbde60531d02ac74da27ea355182e3a4d516 (diff)
llama : avoid using "optional" keyword (#4283)
-rw-r--r--llama.cpp12
1 files changed, 6 insertions, 6 deletions
diff --git a/llama.cpp b/llama.cpp
index 99964ec0..3f5d663c 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1991,11 +1991,11 @@ struct llama_model_loader {
return tensor;
}
- struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) {
+ struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool required = true) {
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
if (cur == NULL) {
- if (optional) {
+ if (!required) {
return NULL;
}
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
@@ -2816,10 +2816,10 @@ static void llm_load_tensors(
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
// optional bias tensors
- layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, true);
- layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, true);
- layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, true);
- layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, true);
+ layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, false);
+ layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, false);
+ layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, false);
+ layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, false);
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);