summaryrefslogtreecommitdiff
path: root/examples/export-lora
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-08-12 15:14:32 +0200
committerGitHub <noreply@github.com>2024-08-12 15:14:32 +0200
commit8f43e551038af2547b5c01d0e9edd641c0e4bd29 (patch)
tree07a4373620a9381d0b5c7189a475990a6feb48a5 /examples/export-lora
parentf5d1af61d79fb53ccfbac2e665e43208c07b083d (diff)
Merge mainline - Aug 12 2024 (#17)
* Merge mainline * Fix after merge * Remove CI check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'examples/export-lora')
-rw-r--r--examples/export-lora/export-lora.cpp24
1 files changed, 5 insertions, 19 deletions
diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp
index 150f7e8d..3176d6e2 100644
--- a/examples/export-lora/export-lora.cpp
+++ b/examples/export-lora/export-lora.cpp
@@ -50,20 +50,6 @@ static struct gguf_context * load_gguf(std::string & fname, struct ggml_context
return ctx_gguf;
}
-static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
- std::string result;
- for (size_t pos = 0; ; pos += search.length()) {
- auto new_pos = s.find(search, pos);
- if (new_pos == std::string::npos) {
- result += s.substr(pos, s.size() - pos);
- break;
- }
- result += s.substr(pos, new_pos - pos) + replace;
- pos = new_pos;
- }
- s = std::move(result);
-}
-
struct file_input {
struct ggml_context * ctx_meta = nullptr;
struct gguf_context * ctx_gguf = nullptr;
@@ -135,7 +121,7 @@ struct lora_merge_ctx {
lora_merge_ctx(
std::string & base_fname,
- std::vector<std::tuple<std::string, float>> & lora_files,
+ std::vector<llama_lora_adapter_info> & lora_files,
std::string & outfile,
int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
@@ -144,9 +130,9 @@ struct lora_merge_ctx {
throw std::runtime_error("split model is not yet supported");
}
- for (auto lora_inp : lora_files) {
- auto fname = std::get<0>(lora_inp);
- auto scale = std::get<1>(lora_inp);
+ for (auto & lora_inp : lora_files) {
+ auto fname = lora_inp.path;
+ auto scale = lora_inp.scale;
std::unique_ptr<file_input> adapter(new file_input(fname, scale));
check_metadata_lora(adapter.get());
adapters.push_back(std::move(adapter));
@@ -407,7 +393,7 @@ int main(int argc, char ** argv) {
g_verbose = (params.verbosity == 1);
try {
- lora_merge_ctx ctx(params.model, params.lora_adapter, params.lora_outfile, params.n_threads);
+ lora_merge_ctx ctx(params.model, params.lora_adapters, params.lora_outfile, params.n_threads);
ctx.run_merge();
} catch (const std::exception & err) {
fprintf(stderr, "%s\n", err.what());