summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-03-22 19:47:14 +0100
committerGitHub <noreply@github.com>2024-03-22 20:47:14 +0200
commit1d0331c12a2f2a6296b471232bd4e66fbf06e6a1 (patch)
tree4417697e55b3a70c97c6655b37491a485a3b9797 /llama.cpp
parentdba1af612926cbd4ebe2d876277af1e3305177e0 (diff)
quantize: options for output and token embedding tensors qtype (#6239)
* quantize: be able to specify the output tensor type * quantize: be able to specify the token embedding tensor type --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp47
1 files changed, 28 insertions, 19 deletions
diff --git a/llama.cpp b/llama.cpp
index aa6c8924..eedca802 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -12141,27 +12141,34 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
// for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
// with the quantization of the output tensor
if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
- int nx = tensor->ne[0];
- if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
- new_type = GGML_TYPE_Q8_0;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
- new_type = GGML_TYPE_Q5_K;
- }
- else if (new_type != GGML_TYPE_Q8_0) {
- new_type = GGML_TYPE_Q6_K;
+ if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
+ new_type = qs.params->output_tensor_type;
+ } else {
+ int nx = tensor->ne[0];
+ if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
+ new_type = GGML_TYPE_Q8_0;
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
+ ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
+ new_type = GGML_TYPE_Q5_K;
+ }
+ else if (new_type != GGML_TYPE_Q8_0) {
+ new_type = GGML_TYPE_Q6_K;
+ }
}
} else if (name == "token_embd.weight") {
- if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
- new_type = GGML_TYPE_Q2_K;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
- new_type = GGML_TYPE_IQ3_S;
- }
- else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
- new_type = GGML_TYPE_IQ3_S;
+ if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
+ new_type = qs.params->token_embedding_type;
+ } else {
+ if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S) {
+ new_type = GGML_TYPE_Q2_K;
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
+ new_type = GGML_TYPE_IQ3_S;
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+ new_type = GGML_TYPE_IQ3_S;
+ }
}
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
@@ -13051,6 +13058,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
struct llama_model_quantize_params result = {
/*.nthread =*/ 0,
/*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
+ /*.output_tensor_type =*/ GGML_TYPE_COUNT,
+ /*.token_embedding_type =*/ GGML_TYPE_COUNT,
/*.allow_requantize =*/ false,
/*.quantize_output_tensor =*/ true,
/*.only_copy =*/ false,