From 154e0d75fccf1784fe9ff6fd76a630b66563da3d Mon Sep 17 00:00:00 2001 From: Kawrakow <48489457+ikawrakow@users.noreply.github.com> Date: Sat, 27 Jul 2024 07:55:01 +0200 Subject: Merge mainline llama.cpp (#3) * Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow --- ggml-vulkan.h | 29 ----------------------------- 1 file changed, 29 deletions(-) delete mode 100644 ggml-vulkan.h (limited to 'ggml-vulkan.h') diff --git a/ggml-vulkan.h b/ggml-vulkan.h deleted file mode 100644 index af661c2d..00000000 --- a/ggml-vulkan.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#include "ggml.h" -#include "ggml-backend.h" - -#ifdef __cplusplus -extern "C" { -#endif - -#define GGML_VK_NAME "Vulkan" -#define GGML_VK_MAX_DEVICES 16 - -GGML_API void ggml_vk_instance_init(void); - -// backend API -GGML_API GGML_CALL ggml_backend_t ggml_backend_vk_init(size_t dev_num); - -GGML_API GGML_CALL bool ggml_backend_is_vk(ggml_backend_t backend); -GGML_API GGML_CALL int ggml_backend_vk_get_device_count(void); -GGML_API GGML_CALL void ggml_backend_vk_get_device_description(int device, char * description, size_t description_size); -GGML_API GGML_CALL void ggml_backend_vk_get_device_memory(int device, size_t * free, size_t * total); - -GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num); -// pinned host buffer for use with the CPU backend for faster copies between CPU and GPU -GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_host_buffer_type(void); - -#ifdef __cplusplus -} -#endif -- cgit v1.2.3