summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorMiwa / Ensan <63481257+ensan-hcl@users.noreply.github.com>2023-12-02 03:19:45 +0900
committerGitHub <noreply@github.com>2023-12-01 20:19:45 +0200
commitb220222a64ce760bfbec9c770f11db3ec6a6abb6 (patch)
treeb320c9bde0fbaed89e98e124143269a4e585477a /examples
parent511f52c334e37033f9c9de07b98fca4abc9470bd (diff)
swift : fix token_to_piece implementation (#4278)
* Fix token_to_piece implementation in Swift * Fix errors
Diffstat (limited to 'examples')
-rw-r--r--examples/batched.swift/Sources/main.swift10
-rw-r--r--examples/llama.swiftui/llama.cpp.swift/LibLlama.swift24
2 files changed, 19 insertions, 15 deletions
diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift
index ba15197a..ce9d80d9 100644
--- a/examples/batched.swift/Sources/main.swift
+++ b/examples/batched.swift/Sources/main.swift
@@ -230,18 +230,15 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String
var result = [CChar](repeating: 0, count: 8)
let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count))
if nTokens < 0 {
- if result.count >= -Int(nTokens) {
- result.removeLast(-Int(nTokens))
- } else {
- result.removeAll()
- }
+ let actualTokensCount = -Int(nTokens)
+ result = .init(repeating: 0, count: actualTokensCount)
let check = llama_token_to_piece(
model,
token,
&result,
Int32(result.count)
)
- assert(check == nTokens)
+ assert(check == actualTokensCount)
} else {
result.removeLast(result.count - Int(nTokens))
}
@@ -259,5 +256,4 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String
buffer = []
return bufferString
}
- return nil
}
diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift
index aaef0961..09b36d9e 100644
--- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift
+++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift
@@ -164,13 +164,21 @@ actor LlamaContext {
private func token_to_piece(token: llama_token) -> String {
let result = UnsafeMutablePointer<Int8>.allocate(capacity: 8)
result.initialize(repeating: Int8(0), count: 8)
-
- let _ = llama_token_to_piece(model, token, result, 8)
-
- let resultStr = String(cString: result)
-
- result.deallocate()
-
- return resultStr
+ defer {
+ result.deallocate()
+ }
+ let nTokens = llama_token_to_piece(model, token, result, 8)
+
+ if nTokens < 0 {
+ let newResult = UnsafeMutablePointer<Int8>.allocate(capacity: Int(-nTokens))
+ newResult.initialize(repeating: Int8(0), count: Int(-nTokens))
+ defer {
+ newResult.deallocate()
+ }
+ _ = llama_token_to_piece(model, token, newResult, -nTokens)
+ return String(cString: newResult)
+ } else {
+ return String(cString: result)
+ }
}
}