summaryrefslogtreecommitdiff
path: root/examples/rpc/rpc-server.cpp
diff options
context:
space:
mode:
authorRadoslav Gerganov <rgerganov@gmail.com>2024-05-14 14:27:19 +0300
committerGitHub <noreply@github.com>2024-05-14 14:27:19 +0300
commit5e31828d3e35c76ecfee665bc23771a4bec1d130 (patch)
tree7f5f2edc7c3fc3e7655904316897e32202edd5d6 /examples/rpc/rpc-server.cpp
parent541600201e6480f54ae09e58d16b154d4b4b331d (diff)
ggml : add RPC backend (#6829)
* ggml : add RPC backend The RPC backend proxies all operations to a remote server which runs a regular backend (CPU, CUDA, Metal, etc). * set TCP_NODELAY * add CI workflows * Address review comments * fix warning * implement llama_max_devices() for RPC * Address review comments * Address review comments * wrap sockfd into a struct * implement get_alignment and get_max_size * add get_device_memory * fix warning * win32 support * add README * readme : trim trailing whitespace * Address review comments * win32 fix * Address review comments * fix compile warnings on macos
Diffstat (limited to 'examples/rpc/rpc-server.cpp')
-rw-r--r--examples/rpc/rpc-server.cpp70
1 files changed, 70 insertions, 0 deletions
diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp
new file mode 100644
index 00000000..496af849
--- /dev/null
+++ b/examples/rpc/rpc-server.cpp
@@ -0,0 +1,70 @@
+#ifdef GGML_USE_CUDA
+#include "ggml-cuda.h"
+#endif
+
+#ifdef GGML_USE_METAL
+#include "ggml-metal.h"
+#endif
+
+#include "ggml-rpc.h"
+#include <string>
+#include <stdio.h>
+
+static ggml_backend_t create_backend() {
+ ggml_backend_t backend = NULL;
+#ifdef GGML_USE_CUDA
+ fprintf(stderr, "%s: using CUDA backend\n", __func__);
+ backend = ggml_backend_cuda_init(0); // init device 0
+ if (!backend) {
+ fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
+ }
+#elif GGML_USE_METAL
+ fprintf(stderr, "%s: using Metal backend\n", __func__);
+ backend = ggml_backend_metal_init();
+ if (!backend) {
+ fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
+ }
+#endif
+
+ // if there aren't GPU Backends fallback to CPU backend
+ if (!backend) {
+ fprintf(stderr, "%s: using CPU backend\n", __func__);
+ backend = ggml_backend_cpu_init();
+ }
+ return backend;
+}
+
+static void get_backend_memory(size_t * free_mem, size_t * total_mem) {
+#ifdef GGML_USE_CUDA
+ ggml_backend_cuda_get_device_memory(0, free_mem, total_mem);
+#else
+ // TODO: implement for other backends
+ *free_mem = 1;
+ *total_mem = 1;
+#endif
+}
+
+int main(int argc, char * argv[]) {
+ if (argc < 3) {
+ fprintf(stderr, "Usage: %s <host> <port>\n", argv[0]);
+ return 1;
+ }
+ const char * host = argv[1];
+ int port = std::stoi(argv[2]);
+ if (port <= 0 || port > 65535) {
+ fprintf(stderr, "Invalid port number: %d\n", port);
+ return 1;
+ }
+ ggml_backend_t backend = create_backend();
+ if (!backend) {
+ fprintf(stderr, "Failed to create backend\n");
+ return 1;
+ }
+ printf("Starting RPC server on %s:%d\n", host, port);
+ size_t free_mem, total_mem;
+ get_backend_memory(&free_mem, &total_mem);
+ std::string endpoint = std::string(host) + ":" + std::to_string(port);
+ start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem);
+ ggml_backend_free(backend);
+ return 0;
+}