diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2024-04-25 14:27:20 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-25 14:27:20 +0300 |
commit | aa750c1ede6232c91de890a14a7731d6daa2bc8e (patch) | |
tree | e0e407a4afa8c6538ed92d1e5d0b9896b1183cbf /llama.cpp | |
parent | 1966eb2615242f224bf9ca939db8905ab6a174a0 (diff) |
tests : minor bash stuff (#6902)
* tests : minor bash stuff
ggml-ci
* llama : fix build
ggml-ci
* tests : fix CUR_DIR -> ROOT_DIR
ggml-ci
* tests : fix fname
ggml-ci
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
@@ -14574,7 +14574,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s fout.close(); } }; - auto new_ofstream = [&](int index = 0) { + auto new_ofstream = [&](int index) { cur_split = index; GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context"); std::string fname = fname_out; @@ -14592,7 +14592,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s }; const auto tn = LLM_TN(model.arch); - new_ofstream(); + new_ofstream(0); for (int i = 0; i < ml.n_tensors; ++i) { auto weight = ml.get_weight(i); struct ggml_tensor * tensor = weight->tensor; |