summaryrefslogtreecommitdiff
path: root/main.cpp
diff options
context:
space:
mode:
authorSebastián A <sebastian.aedo29@gmail.com>2023-03-12 17:15:00 -0300
committerGitHub <noreply@github.com>2023-03-12 22:15:00 +0200
commiteb062bb012c4e131818dd757a6d3a757fdee3961 (patch)
tree9c7865b0f4e8457252ce6b0b936496cc48d08723 /main.cpp
parent7027a97837c351e0a7bc48db2027af368de382db (diff)
Windows fixes (#31)
* Apply fixes suggested to build on windows Issue: https://github.com/ggerganov/llama.cpp/issues/22 * Remove unsupported VLAs * MSVC: Remove features that are only available on MSVC C++20. * Fix zero initialization of the other fields. * Change the use of vector for stack allocations.
Diffstat (limited to 'main.cpp')
-rw-r--r--main.cpp12
1 files changed, 7 insertions, 5 deletions
diff --git a/main.cpp b/main.cpp
index f02b5ddb..a11d755a 100644
--- a/main.cpp
+++ b/main.cpp
@@ -209,8 +209,8 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
// create the ggml context
{
struct ggml_init_params params = {
- .mem_size = ctx_size,
- .mem_buffer = NULL,
+ /*.mem_size =*/ ctx_size,
+ /*.mem_buffer =*/ NULL,
};
model.ctx = ggml_init(params);
@@ -546,12 +546,13 @@ bool llama_eval(
}
struct ggml_init_params params = {
- .mem_size = buf_size,
- .mem_buffer = buf,
+ /*.mem_size =*/ buf_size,
+ /*.mem_buffer =*/ buf,
};
struct ggml_context * ctx0 = ggml_init(params);
- struct ggml_cgraph gf = { .n_threads = n_threads };
+ ggml_cgraph gf = {};
+ gf.n_threads = n_threads;
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
@@ -733,6 +734,7 @@ bool llama_eval(
}
int main(int argc, char ** argv) {
+ ggml_time_init();
const int64_t t_main_start_us = ggml_time_us();
gpt_params params;