diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2024-03-14 22:58:41 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-14 22:58:41 +0200 |
commit | 4755afd1cbd40d93c017e5b98c39796f52345314 (patch) | |
tree | 57eb729da1c1bee6588e9979999a7012cc0d44b3 /llama.cpp | |
parent | 6e0438da3cc95b89cdbf55f45fa4e324d9076792 (diff) |
llama : fix integer overflow during quantization (#6063)
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
@@ -11977,7 +11977,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n return new_type; } -static int32_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) { +static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int chunk_size, int nrows, int n_per_row, const float * imatrix, std::vector<std::thread> & workers, const int nthread) { std::mutex mutex; int counter = 0; size_t new_size = 0; |