diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2025-03-07 08:54:09 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-03-07 08:54:09 +0200 |
commit | c67a37b251fc22b0f8b8313ea5c76a73ff6ed49f (patch) | |
tree | 472c093202025b58d6bc4a8de431bd470f48efde | |
parent | 7bdbf99bbdbfe46b01f7783a7c98a30a1558e2c3 (diff) |
Custom quantization rules with regular expressions (#244)
* Custom quantization rules with regular expressions
* Add the --custom-q option to the help
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
-rw-r--r-- | examples/quantize/quantize.cpp | 31 | ||||
-rw-r--r-- | include/llama.h | 1 | ||||
-rw-r--r-- | src/llama.cpp | 19 |
3 files changed, 51 insertions, 0 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 916f57ec..89de794b 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -144,6 +144,7 @@ static void usage(const char * executable) { printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n"); printf(" --output-tensor-type ggml_type: use this ggml_type for the output.weight tensor.\n"); printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n"); + printf(" --custom-q regex1=type1,regex2=type2...: use this to specify custom quantization type rules.\n\n"); printf("Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n"); printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n"); printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n"); @@ -290,6 +291,28 @@ static ggml_type parse_ggml_type(const char * arg) { return result; } +using CustomQ = std::pair<std::string, ggml_type>; + +static bool parse_custom_quants(const std::string& arg, std::vector<CustomQ>& custom_quants) { + for (const auto & item : string_split<std::string>(arg, ',')) { + auto pos = item.find('='); + if (pos == std::string::npos) { + fprintf(stderr, "Invalid custom quantization input %s\n", arg.c_str()); + return false; + } + auto pattern = item.substr(0, pos); + auto type_as_string = item.substr(pos + 1); + auto type = parse_ggml_type(type_as_string.c_str()); + if (type == GGML_TYPE_COUNT) { + fprintf(stderr, "Invalid quantization type '%s' in custom quantization input %s\n", type_as_string.c_str(), item.c_str()); + return false; + } + printf("Adding custom rule %s -> %s\n", pattern.c_str(), ggml_type_name(type)); + custom_quants.emplace_back(std::move(pattern), type); + } + return true; +} + int main(int argc, char ** argv) { if (argc < 3) { usage(argv[0]); @@ -301,6 +324,7 @@ int main(int argc, char ** argv) { std::string imatrix_file; std::vector<std::string> included_weights, excluded_weights; std::vector<llama_model_kv_override> kv_overrides; + std::vector<CustomQ> custom_quants; for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) { if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) { @@ -371,6 +395,10 @@ int main(int argc, char ** argv) { if (arg_idx == argc-1 || !string_parse_kv_override(argv[++arg_idx], kv_overrides)) { usage(argv[0]); } + } else if (strcmp(argv[arg_idx], "--custom-q") == 0) { + if (arg_idx == argc-1 || !parse_custom_quants(argv[++arg_idx], custom_quants)) { + usage(argv[0]); + } } else if (strcmp(argv[arg_idx], "--allow-requantize") == 0) { params.allow_requantize = true; } else if (strcmp(argv[arg_idx], "--pure") == 0) { @@ -451,6 +479,9 @@ int main(int argc, char ** argv) { kv_overrides.back().key[0] = 0; params.kv_overrides = &kv_overrides; } + if (!custom_quants.empty()) { + params.custom_quants = &custom_quants; + } llama_backend_init(); diff --git a/include/llama.h b/include/llama.h index 38a12744..5e86cb68 100644 --- a/include/llama.h +++ b/include/llama.h @@ -418,6 +418,7 @@ extern "C" { bool ignore_imatrix_rules; // If set to true, the built-in rules for refusing to quantize into certain quants without imatrix are ignored void * imatrix; // pointer to importance matrix data void * kv_overrides; // pointer to vector containing overrides + void * custom_quants; // pointer to vector containing custom quantization rules } llama_model_quantize_params; // grammar types diff --git a/src/llama.cpp b/src/llama.cpp index e246dec9..9c9739e9 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -16283,6 +16283,19 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2; }; + auto custom_type = GGML_TYPE_COUNT; + if (qs.params->custom_quants) { + using CustomQ = std::pair<std::string, ggml_type>; + auto& q_rules = *static_cast<const std::vector<CustomQ>*>(qs.params->custom_quants); + for (auto& rule : q_rules) { + std::regex pattern(rule.first); + if (std::regex_search(name, pattern)) { + custom_type = rule.second; + break; + } + } + } + //auto get_layer = [] (const char * name) { // int il; // if (sscanf(name, "blk.%d.", &il) == 1) return il; @@ -16752,6 +16765,11 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n ++qs.i_ffn_up; } + if (custom_type < GGML_TYPE_COUNT) { + new_type = custom_type; + LLAMA_LOG_INFO("Using custom type %s for tensor %s\n", ggml_type_name(new_type), name.c_str()); + } + // if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K; //} // IK: let's remove this, else Q2_K is almost the same as Q3_K_S @@ -17791,6 +17809,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { /*.ignore_imatrix_rules =*/ false, /*.imatrix =*/ nullptr, /*.kv_overrides =*/ nullptr, + /*.custom_quants =*/ nullptr, }; return result; |