summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-03-21 10:51:37 +0100
committerGitHub <noreply@github.com>2025-03-21 10:51:37 +0100
commit022660f7aba973c149e011eac5c4b3dfea02618d (patch)
tree3d704e35e7df73da83709c69334c417c4eb5317c
parentddc8eee10ee9216de57429167e6f74e618577d93 (diff)
Specify tensor name regex for tensors to be repacked (#274)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
-rw-r--r--examples/quantize/quantize.cpp14
-rw-r--r--include/llama.h1
-rw-r--r--src/llama.cpp41
3 files changed, 52 insertions, 4 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 84ea38d4..1d00c874 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -146,6 +146,7 @@ static void usage(const char * executable) {
printf(" --token-embedding-type ggml_type: use this ggml_type for the token_embd.weight tensor.\n\n");
printf(" --custom-q regex1=type1,regex2=type2...: use this to specify custom quantization type rules.\n\n");
printf(" --repack Repack all tensors to the corresponding _r4/8 variant if available.\n\n");
+ printf(" --repack-pattern Comma separated list of regexs to use for matching tensor names to be repacked.\n\n");
printf("Additional specific tensor quantization types used in the custom quant scheme 'CQS (default is Q2_K):\n");
printf(" --attn-q-type ggml_type: use this ggml_type for the attn_q.weight tensor.\n");
printf(" --attn-k-type ggml_type: use this ggml_type for the attn_k.weight tensor.\n");
@@ -327,6 +328,8 @@ int main(int argc, char ** argv) {
std::vector<llama_model_kv_override> kv_overrides;
std::vector<CustomQ> custom_quants;
+ std::vector<std::string> repack_patterns;
+
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
if (strcmp(argv[arg_idx], "--leave-output-tensor") == 0) {
params.quantize_output_tensor = false;
@@ -334,6 +337,13 @@ int main(int argc, char ** argv) {
params.ignore_imatrix_rules = true;
} else if (strcmp(argv[arg_idx], "--repack") == 0) {
params.only_repack = true;
+ } else if (strcmp(argv[arg_idx], "--repack-pattern") == 0) {
+ if (arg_idx < argc-1) {
+ auto p = string_split(argv[++arg_idx], ',');
+ repack_patterns.insert(repack_patterns.end(), p.begin(), p.end());
+ } else {
+ usage(argv[0]);
+ }
} else if (strcmp(argv[arg_idx], "--output-tensor-type") == 0) {
if (arg_idx < argc-1) {
params.output_tensor_type = parse_ggml_type(argv[++arg_idx]);
@@ -431,6 +441,10 @@ int main(int argc, char ** argv) {
}
}
+ if (!repack_patterns.empty()) {
+ params.repack_pattern = &repack_patterns;
+ }
+
if (argc - arg_idx < 2) {
printf("%s: bad arguments\n", argv[0]);
usage(argv[0]);
diff --git a/include/llama.h b/include/llama.h
index 66e9af02..1b9c47e9 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -420,6 +420,7 @@ extern "C" {
void * imatrix; // pointer to importance matrix data
void * kv_overrides; // pointer to vector containing overrides
void * custom_quants; // pointer to vector containing custom quantization rules
+ void * repack_pattern; // pointer to a vector containing regexes to be used for matching tensor names. Can be null
} llama_model_quantize_params;
// grammar types
diff --git a/src/llama.cpp b/src/llama.cpp
index 33b69389..dfe445b8 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -17348,20 +17348,39 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
bool is_repacked = ml.ftype >= LLAMA_FTYPE_MOSTLY_Q4_0_R8 && ml.ftype <= LLAMA_FTYPE_MOSTLY_Q8_K_R8;
int n_to_repack = 0, n_to_modify = 0;
+ const std::vector<std::string> * repack_pattern = nullptr;
+ if (params->repack_pattern) repack_pattern = (const std::vector<std::string> *)params->repack_pattern;
+
for (int i = 0; i < ml.n_tensors; ++i) {
const struct ggml_tensor * meta = ml.get_tensor_meta(i);
+ const std::string name = ggml_get_name(meta);
+
if (params->only_repack) {
auto repacked_type = (ggml_type)iqk_repacked_type(meta);
+ bool repack = false, modify = false;
if (repacked_type != meta->type) {
- ++n_to_repack;
+ repack = true;
} else if (!is_repacked) {
- if (iqk_should_modify_tensor(meta)) ++n_to_modify;
+ if (iqk_should_modify_tensor(meta)) {
+ modify = true;
+ }
}
+ if ((repack || modify) && repack_pattern) {
+ bool found = false;
+ for (auto& r : *repack_pattern) {
+ std::regex pattern(r);
+ if (std::regex_search(name, pattern)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) repack = modify = false;
+ }
+ if (repack) ++n_to_repack;
+ else if (modify) ++n_to_modify;
}
- const std::string name = ggml_get_name(meta);
-
// TODO: avoid hardcoded tensor names - use the TN_* constants
if (name.find("attn_v.weight") != std::string::npos ||
name.find("attn_qkv.weight") != std::string::npos) {
@@ -17526,6 +17545,19 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
if (params->only_repack) {
ggml_type repacked_type = (ggml_type)iqk_repacked_type(tensor);
bool modify = !is_repacked && iqk_should_modify_tensor(tensor);
+ if ((modify || repacked_type != tensor->type) && repack_pattern) {
+ bool found = false;
+ for (auto& r : *repack_pattern) {
+ std::regex pattern(r);
+ if (std::regex_search(tensor->name, pattern)) {
+ found = true; break;
+ }
+ }
+ if (!found) {
+ modify = false;
+ repacked_type = tensor->type;
+ }
+ }
if (modify || repacked_type != tensor->type) {
new_type = repacked_type;
new_size = ggml_nbytes(tensor);
@@ -18153,6 +18185,7 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
/*.imatrix =*/ nullptr,
/*.kv_overrides =*/ nullptr,
/*.custom_quants =*/ nullptr,
+ /*.repack_pattern =*/ nullptr,
};
return result;