Skip to content

Commit 0e64591

Browse files
authored
swiftui : enable stream updating (#7754)
1 parent b1ef562 commit 0e64591

File tree

1 file changed

+21
-14
lines changed

1 file changed

+21
-14
lines changed

examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -131,22 +131,29 @@ class LlamaState: ObservableObject {
131131

132132
messageLog += "\(text)"
133133

134-
while await llamaContext.n_cur < llamaContext.n_len {
135-
let result = await llamaContext.completion_loop()
136-
messageLog += "\(result)"
137-
}
134+
Task.detached {
135+
while await llamaContext.n_cur < llamaContext.n_len {
136+
let result = await llamaContext.completion_loop()
137+
await MainActor.run {
138+
self.messageLog += "\(result)"
139+
}
140+
}
138141

139-
let t_end = DispatchTime.now().uptimeNanoseconds
140-
let t_generation = Double(t_end - t_heat_end) / NS_PER_S
141-
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
142+
let t_end = DispatchTime.now().uptimeNanoseconds
143+
let t_generation = Double(t_end - t_heat_end) / self.NS_PER_S
144+
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
142145

143-
await llamaContext.clear()
144-
messageLog += """
145-
\n
146-
Done
147-
Heat up took \(t_heat)s
148-
Generated \(tokens_per_second) t/s\n
149-
"""
146+
await llamaContext.clear()
147+
148+
await MainActor.run {
149+
self.messageLog += """
150+
\n
151+
Done
152+
Heat up took \(t_heat)s
153+
Generated \(tokens_per_second) t/s\n
154+
"""
155+
}
156+
}
150157
}
151158

152159
func bench() async {

0 commit comments

Comments
 (0)