summaryrefslogtreecommitdiff
path: root/examples/llama.android/llama/src/main
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-07-27 07:55:01 +0200
committerGitHub <noreply@github.com>2024-07-27 07:55:01 +0200
commit154e0d75fccf1784fe9ff6fd76a630b66563da3d (patch)
tree81ce6dbb5b1900c1aa78a879f0593c694cab9d27 /examples/llama.android/llama/src/main
parent0684c3e9c70d49323b4fc517128cbe222cab7f96 (diff)
Merge mainline llama.cpp (#3)
* Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'examples/llama.android/llama/src/main')
-rw-r--r--examples/llama.android/llama/src/main/cpp/CMakeLists.txt18
-rw-r--r--examples/llama.android/llama/src/main/cpp/llama-android.cpp4
2 files changed, 13 insertions, 9 deletions
diff --git a/examples/llama.android/llama/src/main/cpp/CMakeLists.txt b/examples/llama.android/llama/src/main/cpp/CMakeLists.txt
index 42ebaad4..2de49657 100644
--- a/examples/llama.android/llama/src/main/cpp/CMakeLists.txt
+++ b/examples/llama.android/llama/src/main/cpp/CMakeLists.txt
@@ -11,15 +11,15 @@ cmake_minimum_required(VERSION 3.22.1)
# build script scope).
project("llama-android")
-include(FetchContent)
-FetchContent_Declare(
- llama
- GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
- GIT_TAG master
-)
+#include(FetchContent)
+#FetchContent_Declare(
+# llama
+# GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
+# GIT_TAG master
+#)
# Also provides "common"
-FetchContent_MakeAvailable(llama)
+#FetchContent_MakeAvailable(llama)
# Creates and names a library, sets it as either STATIC
# or SHARED, and provides the relative paths to its source code.
@@ -30,6 +30,10 @@ FetchContent_MakeAvailable(llama)
# the target library name; in the sub-module's CMakeLists.txt, ${PROJECT_NAME}
# is preferred for the same purpose.
#
+
+#load local llama.cpp
+add_subdirectory(../../../../../../ build-llama)
+
# In order to load a library into your app from Java/Kotlin, you must call
# System.loadLibrary() and pass the name of the library defined here;
# for GameActivity/NativeActivity derived applications, the same library name must be
diff --git a/examples/llama.android/llama/src/main/cpp/llama-android.cpp b/examples/llama.android/llama/src/main/cpp/llama-android.cpp
index 874158ef..2aafe231 100644
--- a/examples/llama.android/llama/src/main/cpp/llama-android.cpp
+++ b/examples/llama.android/llama/src/main/cpp/llama-android.cpp
@@ -5,7 +5,7 @@
#include <string>
#include <unistd.h>
#include "llama.h"
-#include "common/common.h"
+#include "common.h"
// Write C++ code here.
//
@@ -409,7 +409,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop(
const auto n_cur = env->CallIntMethod(intvar_ncur, la_int_var_value);
if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) {
- return env->NewStringUTF("");
+ return nullptr;
}
auto new_token_chars = llama_token_to_piece(context, new_token_id);