From eb062bb012c4e131818dd757a6d3a757fdee3961 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebasti=C3=A1n=20A?= Date: Sun, 12 Mar 2023 17:15:00 -0300 Subject: Windows fixes (#31) * Apply fixes suggested to build on windows Issue: https://github.com/ggerganov/llama.cpp/issues/22 * Remove unsupported VLAs * MSVC: Remove features that are only available on MSVC C++20. * Fix zero initialization of the other fields. * Change the use of vector for stack allocations. --- quantize.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'quantize.cpp') diff --git a/quantize.cpp b/quantize.cpp index 0ae53733..14c7b277 100644 --- a/quantize.cpp +++ b/quantize.cpp @@ -289,6 +289,7 @@ bool llama_model_quantize(const std::string & fname_inp, const std::string & fna // ./llama-quantize models/llama/ggml-model.bin models/llama/ggml-model-quant.bin type // int main(int argc, char ** argv) { + ggml_time_init(); if (argc != 4) { fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n", argv[0]); fprintf(stderr, " type = 2 - q4_0\n"); -- cgit v1.2.3