From 89758723c75ba594e401f6513751beeba7ca1d28 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 23 Jan 2024 14:12:57 +0200 Subject: minor : clean-up some warnings and style (#5094) * minor : clean-up some warnings and style ggml-ci * ggml : add comment --- examples/llava/clip.cpp | 47 +++++++++++++++++++---------------------------- 1 file changed, 19 insertions(+), 28 deletions(-) (limited to 'examples/llava/clip.cpp') diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 6161fd85..4a0338a3 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -2,18 +2,6 @@ // so there might be still unnecessary artifacts hanging around // I'll gradually clean and extend it -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include "clip.h" #include "ggml.h" #include "ggml-alloc.h" @@ -30,6 +18,19 @@ #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + static std::string format(const char * fmt, ...) { va_list ap; va_list ap2; @@ -217,9 +218,9 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { static void print_tensor_info(const ggml_tensor* tensor, const char* prefix = "") { size_t tensor_size = ggml_nbytes(tensor); - printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%d, %d, %d, %d], type: %d\n", + printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n", prefix, ggml_n_dims(tensor), tensor->name, tensor_size, - tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->type); + tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type)); } static projector_type clip_projector_type_from_string(const std::string & name) { @@ -592,7 +593,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3)); mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]); // stride = 1, padding = 1, bias is nullptr - block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, nullptr, 1, 1, 1, 1, 1, 1); + block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1); // layer norm // // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1] @@ -640,7 +641,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 // block_2 { // stride = 2 - block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, nullptr, 2, 2, 1, 1, 1, 1); + block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1); // block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1] // layer norm @@ -741,18 +742,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { { std::map n_type; - uint32_t n_type_max = 0; - enum ggml_type type_max = GGML_TYPE_F32; - for (int i = 0; i < n_tensors; i++) { enum ggml_type type = gguf_get_tensor_type(ctx, i); n_type[type]++; - - if (n_type_max < n_type[type]) { - n_type_max = n_type[type]; - type_max = type; - } } printf("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); @@ -795,14 +788,12 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { size_t tensor_size = ggml_nbytes(cur); buffer_size += tensor_size; if (verbosity >= 3) { - printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%d, %d, %d, %d], type: %d\n", __func__, i, - ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], type); + printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n", + __func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type)); } } } - - buffer_size += n_tensors * 128 /* CLIP PADDING */; clip_ctx * new_clip = new clip_ctx; -- cgit v1.2.3