summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorShuichi Tsutsumi <shuichi0526@gmail.com>2024-06-21 14:30:58 +0900
committerGitHub <noreply@github.com>2024-06-21 08:30:58 +0300
commit0e64591e8290037db6412665a56354b789a0597e (patch)
treeac580de69efe8c5ae5503f860d1ace8ec6f2b6ff
parentb1ef562bc17fbf8ba436ddf2d89b78efd10d3e03 (diff)
swiftui : enable stream updating (#7754)
-rw-r--r--examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift35
1 files changed, 21 insertions, 14 deletions
diff --git a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift
index 5bde1891..2c1e3f61 100644
--- a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift
+++ b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift
@@ -131,22 +131,29 @@ class LlamaState: ObservableObject {
messageLog += "\(text)"
- while await llamaContext.n_cur < llamaContext.n_len {
- let result = await llamaContext.completion_loop()
- messageLog += "\(result)"
- }
+ Task.detached {
+ while await llamaContext.n_cur < llamaContext.n_len {
+ let result = await llamaContext.completion_loop()
+ await MainActor.run {
+ self.messageLog += "\(result)"
+ }
+ }
- let t_end = DispatchTime.now().uptimeNanoseconds
- let t_generation = Double(t_end - t_heat_end) / NS_PER_S
- let tokens_per_second = Double(await llamaContext.n_len) / t_generation
+ let t_end = DispatchTime.now().uptimeNanoseconds
+ let t_generation = Double(t_end - t_heat_end) / self.NS_PER_S
+ let tokens_per_second = Double(await llamaContext.n_len) / t_generation
- await llamaContext.clear()
- messageLog += """
- \n
- Done
- Heat up took \(t_heat)s
- Generated \(tokens_per_second) t/s\n
- """
+ await llamaContext.clear()
+
+ await MainActor.run {
+ self.messageLog += """
+ \n
+ Done
+ Heat up took \(t_heat)s
+ Generated \(tokens_per_second) t/s\n
+ """
+ }
+ }
}
func bench() async {