diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-04-20 23:32:59 +0300 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-04-20 23:32:59 +0300 |
commit | 12b5900dbc9743dee3ce83513cf5c3a44523a1b6 (patch) | |
tree | ae9f5b7ed722ea0c52bd434eb56373011c59d402 /llama.cpp | |
parent | 9ff334f3c9b960a44c5e149b08c748a2914fb882 (diff) |
ggml : sync ggml (add GPT-NeoX RoPE implementation)
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 5 |
1 files changed, 5 insertions, 0 deletions
@@ -1618,6 +1618,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s // quantize only 2D tensors quantize &= (tensor.ne.size() == 2); + // GG: uncomment this to keep the output layer in FP16 + //if (tensor.name.rfind("output")) { + // quantize = false; + //} + enum ggml_type new_type; void * new_data; size_t new_size; |