summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-01-14 16:21:12 +0200
committerGitHub <noreply@github.com>2024-01-14 16:21:12 +0200
commit467a882fd2e5b6172897b49aa45aa29bd3f27685 (patch)
tree39f03df9b8418028c59380a8a4555395ba13f685 /examples
parentbb0c1392479398f9aba86d9ec98db0b95ede6e6d (diff)
Add ability to use importance matrix for all k-quants (#4930)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/quantize/quantize.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index f4e2175f..2ae04693 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -82,7 +82,7 @@ static void usage(const char * executable) {
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
printf(" --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
printf(" --pure: Disable k-quant mixtures and quantize all tensors to the same type\n");
- printf(" --imatrixfile_name: use data in file_name as importance matrix for quant optimizations\n");
+ printf(" --imatrix file_name: use data in file_name as importance matrix for quant optimizations\n");
printf(" --include-weights tensor_name: use importance matrix for this/these tensor(s)\n");
printf(" --exclude-weights tensor_name: use importance matrix for this/these tensor(s)\n");
printf("Note: --include-weights and --exclude-weights cannot be used together\n");