From 5e31828d3e35c76ecfee665bc23771a4bec1d130 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Tue, 14 May 2024 14:27:19 +0300 Subject: ggml : add RPC backend (#6829) * ggml : add RPC backend The RPC backend proxies all operations to a remote server which runs a regular backend (CPU, CUDA, Metal, etc). * set TCP_NODELAY * add CI workflows * Address review comments * fix warning * implement llama_max_devices() for RPC * Address review comments * Address review comments * wrap sockfd into a struct * implement get_alignment and get_max_size * add get_device_memory * fix warning * win32 support * add README * readme : trim trailing whitespace * Address review comments * win32 fix * Address review comments * fix compile warnings on macos --- examples/rpc/rpc-server.cpp | 70 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 examples/rpc/rpc-server.cpp (limited to 'examples/rpc/rpc-server.cpp') diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp new file mode 100644 index 00000000..496af849 --- /dev/null +++ b/examples/rpc/rpc-server.cpp @@ -0,0 +1,70 @@ +#ifdef GGML_USE_CUDA +#include "ggml-cuda.h" +#endif + +#ifdef GGML_USE_METAL +#include "ggml-metal.h" +#endif + +#include "ggml-rpc.h" +#include +#include + +static ggml_backend_t create_backend() { + ggml_backend_t backend = NULL; +#ifdef GGML_USE_CUDA + fprintf(stderr, "%s: using CUDA backend\n", __func__); + backend = ggml_backend_cuda_init(0); // init device 0 + if (!backend) { + fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); + } +#elif GGML_USE_METAL + fprintf(stderr, "%s: using Metal backend\n", __func__); + backend = ggml_backend_metal_init(); + if (!backend) { + fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); + } +#endif + + // if there aren't GPU Backends fallback to CPU backend + if (!backend) { + fprintf(stderr, "%s: using CPU backend\n", __func__); + backend = ggml_backend_cpu_init(); + } + return backend; +} + +static void get_backend_memory(size_t * free_mem, size_t * total_mem) { +#ifdef GGML_USE_CUDA + ggml_backend_cuda_get_device_memory(0, free_mem, total_mem); +#else + // TODO: implement for other backends + *free_mem = 1; + *total_mem = 1; +#endif +} + +int main(int argc, char * argv[]) { + if (argc < 3) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + const char * host = argv[1]; + int port = std::stoi(argv[2]); + if (port <= 0 || port > 65535) { + fprintf(stderr, "Invalid port number: %d\n", port); + return 1; + } + ggml_backend_t backend = create_backend(); + if (!backend) { + fprintf(stderr, "Failed to create backend\n"); + return 1; + } + printf("Starting RPC server on %s:%d\n", host, port); + size_t free_mem, total_mem; + get_backend_memory(&free_mem, &total_mem); + std::string endpoint = std::string(host) + ":" + std::to_string(port); + start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem); + ggml_backend_free(backend); + return 0; +} -- cgit v1.2.3