summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/baby-llama/baby-llama.cpp18
-rw-r--r--examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp4
-rw-r--r--examples/finetune/finetune.cpp2
-rw-r--r--examples/gguf/gguf.cpp2
-rw-r--r--examples/llava/clip.cpp6
5 files changed, 16 insertions, 16 deletions
diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp
index 8155101d..2dc2988d 100644
--- a/examples/baby-llama/baby-llama.cpp
+++ b/examples/baby-llama/baby-llama.cpp
@@ -1258,9 +1258,9 @@ static struct ggml_tensor * forward_lora(
}
static void sample_softmax(struct ggml_tensor * logits, struct ggml_tensor * probs, struct ggml_tensor * best_samples) {
- assert(logits->n_dims == 2);
- assert(probs->n_dims == 2);
- assert(best_samples->n_dims == 1);
+ assert(ggml_is_matrix(logits));
+ assert(ggml_is_matrix(probs));
+ assert(ggml_is_vector(best_samples));
assert(logits->ne[1] == best_samples->ne[0]);
assert(logits->ne[0] == probs->ne[0]);
assert(logits->ne[1] == probs->ne[1]);
@@ -1292,9 +1292,9 @@ static void sample_softmax_batch(
struct ggml_context * ctx, struct ggml_tensor * logits, struct ggml_tensor * probs,
struct ggml_tensor * best_samples
) {
- GGML_ASSERT(best_samples->n_dims == 2);
- GGML_ASSERT(logits->n_dims == 3);
- GGML_ASSERT(probs->n_dims == 3);
+ GGML_ASSERT(ggml_is_matrix(best_samples));
+ GGML_ASSERT(ggml_is_3d(logits));
+ GGML_ASSERT(ggml_is_3d(probs));
int n_tokens = best_samples->ne[0];
int n_batch = best_samples->ne[1];
int n_vocab = logits->ne[0];
@@ -1334,7 +1334,7 @@ static void print_row(struct ggml_tensor * probs, int i) {
}
static void print_matrix(struct ggml_tensor * probs) {
- assert(probs->n_dims == 2);
+ assert(ggml_is_matrix(probs));
for (int i = 0; i < probs->ne[1]; ++i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = ggml_get_f32_1d(probs, i*probs->ne[0] + k);
@@ -1386,8 +1386,8 @@ static void get_example_targets(int example_id, struct ggml_tensor * tokens_inpu
static void get_example_targets_batch(
struct ggml_context * ctx, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * targets
) {
- GGML_ASSERT(tokens_input->n_dims == 2);
- GGML_ASSERT( targets->n_dims == 3);
+ GGML_ASSERT(ggml_is_matrix(tokens_input));
+ GGML_ASSERT(ggml_is_3d(targets));
int n_tokens = tokens_input->ne[0];
int n_batch = tokens_input->ne[1];
GGML_ASSERT(n_tokens == targets->ne[1]);
diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
index cae3bf3c..4d41e177 100644
--- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
+++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
@@ -427,7 +427,7 @@ static void print_row(struct ggml_tensor * probs, int i) {
}
static void print_matrix(struct ggml_tensor * probs) {
- assert(probs->n_dims == 2);
+ assert(ggml_is_matrix(probs));
for (int i = 0; i < probs->ne[1]; ++i) {
for (int k = 0; k < probs->ne[0]; ++k) {
float p = get_f32_2d(probs, k, i);
@@ -639,7 +639,7 @@ static void load_vocab(const char *filename, Config *config, struct llama_vocab
static void convert_weights_ak_to_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
int ct;
- switch (gg_weights->n_dims){
+ switch (ggml_n_dims(gg_weights)) {
case 1:
ct = 0;
for (int i0 = 0; i0 < gg_weights->ne[0]; i0++){
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp
index af46e44a..b9849e8c 100644
--- a/examples/finetune/finetune.cpp
+++ b/examples/finetune/finetune.cpp
@@ -1110,7 +1110,7 @@ static void write_tensor(struct llama_file * file, struct ggml_tensor * tensor,
name = ggml_get_name(tensor);
}
uint32_t name_len = strlen(name);
- uint32_t nd = tensor->n_dims;
+ uint32_t nd = ggml_n_dims(tensor);
uint32_t ne[4] = { (uint32_t)tensor->ne[0],
(uint32_t)tensor->ne[1],
(uint32_t)tensor->ne[2],
diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp
index 9ab63a29..9e24bf24 100644
--- a/examples/gguf/gguf.cpp
+++ b/examples/gguf/gguf.cpp
@@ -195,7 +195,7 @@ static bool gguf_ex_read_1(const std::string & fname) {
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
- printf("%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, cur->n_dims, cur->name, cur->data);
+ printf("%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, ggml_n_dims(cur), cur->name, cur->data);
// print first 10 elements
const float * data = (const float *) cur->data;
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
index 4bb7b93b..11246596 100644
--- a/examples/llava/clip.cpp
+++ b/examples/llava/clip.cpp
@@ -514,7 +514,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
ctx_size += padded_size;
if (verbosity >= 3) {
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, padded_size=%zu, offset=%zu\n", __func__, i,
- cur->n_dims, cur->name, tensor_size, padded_size, offset);
+ ggml_n_dims(cur), cur->name, tensor_size, padded_size, offset);
}
}
}
@@ -962,7 +962,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
}
// quantize only 2D tensors
- quantize &= (cur->n_dims == 2);
+ quantize &= (ggml_n_dims(cur) == 2);
if (quantize) {
new_type = type;
@@ -1035,7 +1035,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
fout.put(0);
}
- printf("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), cur->n_dims, quantize,
+ printf("%s: n_dims = %d | quantize=%d | size = %f MB -> %f MB\n", name.c_str(), ggml_n_dims(cur), quantize,
orig_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
}