summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-03-22 09:36:03 +0200
committerGitHub <noreply@github.com>2024-03-22 09:36:03 +0200
commit95d576b48ebf582b112d1c9cf4eed7142fa4e464 (patch)
tree1e298e73e8e8de5d7f93d78272f44630fa7198cb
parent59c17f02de8fdf7b084d6100b875b7e2bc07a83b (diff)
metal : pad n_ctx by 32 (#6177)
* metal : require ne00 >= 128 for mat-mat kernels ggml-ci * llama : pad n_ctx by 32 ggml-ci
-rw-r--r--common/common.cpp2
-rw-r--r--examples/batched/batched.cpp4
-rw-r--r--llama.cpp3
-rw-r--r--tests/test-backend-ops.cpp7
4 files changed, 14 insertions, 2 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 11bf554d..cc230c9f 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -101,7 +101,7 @@ int32_t get_num_physical_cores() {
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
}
-void process_escapes(std::string& input) {
+void process_escapes(std::string & input) {
std::size_t input_len = input.length();
std::size_t output_idx = 0;
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp
index ee1f8f1b..7aaf63ce 100644
--- a/examples/batched/batched.cpp
+++ b/examples/batched/batched.cpp
@@ -48,6 +48,8 @@ int main(int argc, char ** argv) {
params.prompt = "Hello my name is";
}
+ process_escapes(params.prompt);
+
// init LLM
llama_backend_init();
@@ -78,7 +80,7 @@ int main(int argc, char ** argv) {
llama_context_params ctx_params = llama_context_default_params();
ctx_params.seed = 1234;
- ctx_params.n_ctx = n_kv_req;
+ ctx_params.n_ctx = n_kv_req;
ctx_params.n_batch = std::max(n_len, n_parallel);
ctx_params.n_seq_max = n_parallel;
ctx_params.n_threads = params.n_threads;
diff --git a/llama.cpp b/llama.cpp
index 1a9fe0c4..9de4a860 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model(
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
+ // this is necessary due to kv_self.n being padded later during inference
+ cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32);
+
// with causal attention, the batch size is limited by the context size
cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp
index c2916c3e..1998e1cb 100644
--- a/tests/test-backend-ops.cpp
+++ b/tests/test-backend-ops.cpp
@@ -2091,6 +2091,13 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
}
}
+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 128, { 8, 1}, {1, 1}));
+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 128, { 8, 1}, {4, 1}));
+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 2, 64, { 8, 1}, {4, 1}));
+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 83, 2, 64, { 8, 1}, {4, 1}));
+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 64, 45, 128, { 8, 1}, {4, 1}));
+ test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 128, 45, 64, { 8, 1}, {4, 1}));
+
for (ggml_type type_a : all_types) {
for (ggml_type type_b : {GGML_TYPE_F32 /*, GGML_TYPE_F16 */}) {
for (int n_mats : {2, 4, 8}) {