summaryrefslogtreecommitdiff
path: root/common/common.h
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-08-28 17:59:39 +0200
committerGitHub <noreply@github.com>2023-08-28 17:59:39 +0200
commit6b73ef120114beb5664ea94aab48d07ed248ee52 (patch)
tree6d9c777a34a43f7b3ad6185df9639bab9be5c5cd /common/common.h
parent75fafcbcccc280a5b3883bc76d0a2dabf474d094 (diff)
YAML result logging + preset script (#2657)
Diffstat (limited to 'common/common.h')
-rw-r--r--common/common.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/common/common.h b/common/common.h
index 97fda2be..c1537314 100644
--- a/common/common.h
+++ b/common/common.h
@@ -11,6 +11,12 @@
#include <unordered_map>
#include <tuple>
+#ifdef _WIN32
+#define DIRECTORY_SEPARATOR '\\'
+#else
+#define DIRECTORY_SEPARATOR '/'
+#endif // _WIN32
+
//
// CLI argument parsing
//
@@ -61,6 +67,7 @@ struct gpt_params {
std::string input_suffix = ""; // string to suffix user inputs with
std::string grammar = ""; // optional BNF-like grammar to constrain sampling
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
+ std::string logdir = ""; // directory in which to save YAML log files
std::string lora_adapter = ""; // lora adapter path
std::string lora_base = ""; // base model path for the lora adapter
@@ -82,6 +89,7 @@ struct gpt_params {
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
bool embedding = false; // get only sentence embedding
+ bool escape = false; // escape "\n", "\r", "\t", "\'", "\"", and "\\"
bool interactive_first = false; // wait for user input immediately
bool multiline_input = false; // reverse the usage of `\`
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
@@ -144,3 +152,13 @@ std::string llama_detokenize_spm(
std::string llama_detokenize_bpe(
llama_context * ctx,
const std::vector<llama_token> & tokens);
+
+bool create_directory_with_parents(const std::string & path);
+void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
+void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
+void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
+std::string get_sortable_timestamp();
+
+void dump_non_result_info_yaml(
+ FILE * stream, const gpt_params & params, const llama_context * lctx,
+ const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);