summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-04-20 23:32:59 +0300
committerGeorgi Gerganov <ggerganov@gmail.com>2023-04-20 23:32:59 +0300
commit12b5900dbc9743dee3ce83513cf5c3a44523a1b6 (patch)
treeae9f5b7ed722ea0c52bd434eb56373011c59d402 /llama.cpp
parent9ff334f3c9b960a44c5e149b08c748a2914fb882 (diff)
ggml : sync ggml (add GPT-NeoX RoPE implementation)
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp5
1 files changed, 5 insertions, 0 deletions
diff --git a/llama.cpp b/llama.cpp
index e4c414c2..4a646eb9 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1618,6 +1618,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
// quantize only 2D tensors
quantize &= (tensor.ne.size() == 2);
+ // GG: uncomment this to keep the output layer in FP16
+ //if (tensor.name.rfind("output")) {
+ // quantize = false;
+ //}
+
enum ggml_type new_type;
void * new_data;
size_t new_size;