summaryrefslogtreecommitdiff
path: root/common/common.h
diff options
context:
space:
mode:
Diffstat (limited to 'common/common.h')
-rw-r--r--common/common.h88
1 files changed, 46 insertions, 42 deletions
diff --git a/common/common.h b/common/common.h
index a8e5e50e..f68f3c29 100644
--- a/common/common.h
+++ b/common/common.h
@@ -27,7 +27,7 @@
#define die_fmt(fmt, ...) do { fprintf(stderr, "error: " fmt "\n", __VA_ARGS__); exit(1); } while (0)
#define print_build_info() do { \
- fprintf(stderr, "%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); \
+ fprintf(stderr, "%s: build = %d (%s)\n", __func__, LLAMA_BUILD_NUMBER, LLAMA_COMMIT); \
fprintf(stderr, "%s: built with %s for %s\n", __func__, LLAMA_COMPILER, LLAMA_BUILD_TARGET); \
} while(0)
@@ -35,14 +35,18 @@
// build info
extern int LLAMA_BUILD_NUMBER;
-extern char const *LLAMA_COMMIT;
-extern char const *LLAMA_COMPILER;
-extern char const *LLAMA_BUILD_TARGET;
+extern char const * LLAMA_COMMIT;
+extern char const * LLAMA_COMPILER;
+extern char const * LLAMA_BUILD_TARGET;
struct llama_control_vector_load_info;
-int get_math_cpu_count();
-int32_t get_num_physical_cores();
+//
+// CPU utils
+//
+
+int32_t cpu_get_num_physical_cores();
+int32_t cpu_get_num_math();
//
// CLI argument parsing
@@ -51,7 +55,7 @@ int32_t get_num_physical_cores();
struct gpt_params {
uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
- int32_t n_threads = get_math_cpu_count();
+ int32_t n_threads = cpu_get_num_math();
int32_t n_threads_draft = -1;
int32_t n_threads_batch = -1; // number of threads to use for batch processing (-1 = use n_threads)
int32_t n_threads_batch_draft = -1;
@@ -179,33 +183,34 @@ struct gpt_params {
void gpt_params_handle_model_default(gpt_params & params);
-bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
-
-bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params);
-
-bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
+bool gpt_params_parse_ex (int argc, char ** argv, gpt_params & params);
+bool gpt_params_parse (int argc, char ** argv, gpt_params & params);
+bool gpt_params_find_arg (int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param);
+void gpt_params_print_usage(int argc, char ** argv, const gpt_params & params);
-void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
+std::string gpt_params_get_system_info(const gpt_params & params);
-bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_params & params, int & i, bool & invalid_param);
-
-std::string get_system_info(const gpt_params & params);
+//
+// String utils
+//
-std::string gpt_random_prompt(std::mt19937 & rng);
+std::vector<std::string> string_split(std::string input, char separator);
-void process_escapes(std::string& input);
+std::string string_strip(const std::string & str);
+std::string string_get_sortable_timestamp();
+std::string string_random_prompt(std::mt19937 & rng);
-bool validate_file_name(const std::string & filename);
+bool string_parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides);
+void string_process_escapes(std::string & input);
//
-// String utils
+// Filesystem utils
//
-std::vector<llama_sampler_type> sampler_types_from_names(const std::vector<std::string> & names, bool allow_alt_names);
-std::vector<llama_sampler_type> sampler_types_from_chars(const std::string & names_string);
-std::vector<std::string> string_split(std::string input, char separator);
-std::string string_strip(const std::string & str);
-std::string sampler_type_to_name_string(llama_sampler_type sampler_type);
+bool fs_validate_filename(const std::string & filename);
+bool fs_create_directory_with_parents(const std::string & path);
+
+std::string fs_get_cache_directory();
//
// Model utils
@@ -277,29 +282,14 @@ std::string llama_detokenize_bpe(
bool llama_should_add_bos_token(const llama_model * model);
//
-// YAML utils
-//
-
-bool create_directory_with_parents(const std::string & path);
-std::string get_cache_directory();
-void dump_vector_float_yaml(FILE * stream, const char * prop_name, const std::vector<float> & data);
-void dump_vector_int_yaml(FILE * stream, const char * prop_name, const std::vector<int> & data);
-void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const char * data);
-std::string get_sortable_timestamp();
-
-void dump_non_result_info_yaml(
- FILE * stream, const gpt_params & params, const llama_context * lctx,
- const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
-
-//
// KV cache utils
//
// Dump the KV cache view with the number of sequences per cell.
-void dump_kv_cache_view(const llama_kv_cache_view & view, int row_size = 80);
+void llama_kv_cache_dump_view(const llama_kv_cache_view & view, int row_size = 80);
// Dump the KV cache view showing individual sequences in each cell (long output).
-void dump_kv_cache_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
+void llama_kv_cache_dump_view_seqs(const llama_kv_cache_view & view, int row_size = 40);
//
// Embedding utils
@@ -333,6 +323,20 @@ llama_control_vector_data llama_control_vector_load(const std::vector<llama_cont
//
// Split utils
//
+
static const char * const LLM_KV_SPLIT_NO = "split.no";
static const char * const LLM_KV_SPLIT_COUNT = "split.count";
static const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
+
+//
+// YAML utils
+//
+
+void yaml_dump_vector_float (FILE * stream, const char * prop_name, const std::vector<float> & data);
+void yaml_dump_vector_int (FILE * stream, const char * prop_name, const std::vector<int> & data);
+void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const char * data);
+
+void yaml_dump_non_result_info(
+ FILE * stream, const gpt_params & params, const llama_context * lctx,
+ const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
+