diff options
author | Jared Van Bortel <jared@nomic.ai> | 2024-03-23 18:48:02 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-23 18:48:02 -0400 |
commit | 94d1b3b4119209efcdd08df0dceaecbd1fe7f85c (patch) | |
tree | 2238dd7ef96c0dd0a0576437585e8667f81e665a /llama.cpp | |
parent | 95562175f83a49755ff6fd3bad09409417c8e6f9 (diff) |
use _wfopen instead of fopen on Windows (#6248)
also fix missing #defines before windows.h, and BPE LF token on MSVC
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
@@ -1065,7 +1065,7 @@ struct llama_file { size_t size; llama_file(const char * fname, const char * mode) { - fp = std::fopen(fname, mode); + fp = ggml_fopen(fname, mode); if (fp == NULL) { throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); } @@ -4006,7 +4006,7 @@ static void llm_load_vocab( } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) { vocab.linefeed_id = vocab.special_pad_id; } else { - const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false); + const std::vector<int> ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); vocab.linefeed_id = ids[0]; } |