diff options
| author | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
|---|---|---|
| committer | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
| commit | b333b06772c89d96aacb5490d6a219fba7c09cc6 (patch) | |
| tree | 211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/examples/batched.swift/Sources/main.swift | |
| download | llmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz | |
Engage!
Diffstat (limited to 'llama.cpp/examples/batched.swift/Sources/main.swift')
| -rw-r--r-- | llama.cpp/examples/batched.swift/Sources/main.swift | 256 |
1 files changed, 256 insertions, 0 deletions
diff --git a/llama.cpp/examples/batched.swift/Sources/main.swift b/llama.cpp/examples/batched.swift/Sources/main.swift new file mode 100644 index 0000000..fd90bbe --- /dev/null +++ b/llama.cpp/examples/batched.swift/Sources/main.swift | |||
| @@ -0,0 +1,256 @@ | |||
| 1 | import Foundation | ||
| 2 | import llama | ||
| 3 | |||
| 4 | let arguments = CommandLine.arguments | ||
| 5 | |||
| 6 | // Check that we have at least one argument (the model path) | ||
| 7 | guard arguments.count > 1 else { | ||
| 8 | print("Usage: swift MODEL_PATH [PROMPT] [PARALLEL]") | ||
| 9 | exit(1) | ||
| 10 | } | ||
| 11 | |||
| 12 | let modelPath: String = arguments[1] | ||
| 13 | let prompt: String = arguments.count > 2 ? arguments[2] : "Hello my name is" | ||
| 14 | let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(arguments[3])! : 1 | ||
| 15 | |||
| 16 | // total length of the sequences including the prompt | ||
| 17 | let n_len: Int = 32 | ||
| 18 | |||
| 19 | // init LLM | ||
| 20 | llama_backend_init() | ||
| 21 | defer { | ||
| 22 | llama_backend_free() | ||
| 23 | } | ||
| 24 | |||
| 25 | let model_params = llama_model_default_params() | ||
| 26 | guard let model = llama_model_load_from_file(modelPath.cString(using: .utf8), model_params) else { | ||
| 27 | print("Failed to load model") | ||
| 28 | exit(1) | ||
| 29 | } | ||
| 30 | defer { | ||
| 31 | llama_model_free(model) | ||
| 32 | } | ||
| 33 | |||
| 34 | guard let vocab = llama_model_get_vocab(model) else { | ||
| 35 | print("Failed to get vocab") | ||
| 36 | exit(1) | ||
| 37 | } | ||
| 38 | |||
| 39 | var tokens = tokenize(text: prompt, add_bos: true) | ||
| 40 | |||
| 41 | let n_kv_req = UInt32(tokens.count) + UInt32((n_len - Int(tokens.count)) * n_parallel) | ||
| 42 | |||
| 43 | var context_params = llama_context_default_params() | ||
| 44 | context_params.n_ctx = n_kv_req | ||
| 45 | context_params.n_batch = UInt32(max(n_len, n_parallel)) | ||
| 46 | context_params.n_threads = 8 | ||
| 47 | context_params.n_threads_batch = 8 | ||
| 48 | |||
| 49 | let context = llama_init_from_model(model, context_params) | ||
| 50 | guard context != nil else { | ||
| 51 | print("Failed to initialize context") | ||
| 52 | exit(1) | ||
| 53 | } | ||
| 54 | defer { | ||
| 55 | llama_free(context) | ||
| 56 | } | ||
| 57 | |||
| 58 | var sparams = llama_sampler_chain_default_params() | ||
| 59 | |||
| 60 | let smpl = llama_sampler_chain_init(sparams) | ||
| 61 | guard smpl != nil else { | ||
| 62 | print("Failed to initialize sampling") | ||
| 63 | exit(1) | ||
| 64 | } | ||
| 65 | defer { | ||
| 66 | llama_sampler_free(smpl) | ||
| 67 | } | ||
| 68 | |||
| 69 | llama_sampler_chain_add(smpl, llama_sampler_init_top_k(40)); | ||
| 70 | llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1)); | ||
| 71 | llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.4)); | ||
| 72 | llama_sampler_chain_add(smpl, llama_sampler_init_dist (1234)); | ||
| 73 | |||
| 74 | let n_ctx = llama_n_ctx(context) | ||
| 75 | |||
| 76 | print("\nn_len = \(n_len), n_ctx = \(n_ctx), n_batch = \(context_params.n_batch), n_parallel = \(n_parallel), n_kv_req = \(n_kv_req)\n") | ||
| 77 | |||
| 78 | if n_kv_req > n_ctx { | ||
| 79 | print("error: n_kv_req (%d) > n_ctx, the required KV cache size is not big enough\n", n_kv_req) | ||
| 80 | exit(1) | ||
| 81 | } | ||
| 82 | |||
| 83 | var buffer: [CChar] = [] | ||
| 84 | for id: llama_token in tokens { | ||
| 85 | print(token_to_piece(token: id, buffer: &buffer) ?? "", terminator: "") | ||
| 86 | } | ||
| 87 | |||
| 88 | print("\n") | ||
| 89 | |||
| 90 | var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0, 1) | ||
| 91 | defer { | ||
| 92 | llama_batch_free(batch) | ||
| 93 | } | ||
| 94 | |||
| 95 | // evaluate the initial prompt | ||
| 96 | batch.n_tokens = Int32(tokens.count) | ||
| 97 | |||
| 98 | for (i, token) in tokens.enumerated() { | ||
| 99 | batch.token[i] = token | ||
| 100 | batch.pos[i] = Int32(i) | ||
| 101 | batch.n_seq_id[i] = 1 | ||
| 102 | // batch.seq_id[i][0] = 0 | ||
| 103 | // TODO: is this the proper way to do this? | ||
| 104 | if let seq_id = batch.seq_id[i] { | ||
| 105 | seq_id[0] = 0 | ||
| 106 | } | ||
| 107 | batch.logits[i] = 0 | ||
| 108 | } | ||
| 109 | |||
| 110 | // llama_decode will output logits only for the last token of the prompt | ||
| 111 | batch.logits[Int(batch.n_tokens) - 1] = 1 | ||
| 112 | |||
| 113 | if llama_decode(context, batch) != 0 { | ||
| 114 | print("llama_decode() failed") | ||
| 115 | exit(1) | ||
| 116 | } | ||
| 117 | |||
| 118 | for i in 1 ..< n_parallel { | ||
| 119 | llama_memory_seq_cp(llama_get_memory(context), 0, Int32(i), 0, batch.n_tokens) | ||
| 120 | } | ||
| 121 | |||
| 122 | if n_parallel > 1 { | ||
| 123 | print("generating \(n_parallel) sequences ...\n") | ||
| 124 | } | ||
| 125 | |||
| 126 | var streams: [String] = .init(repeating: "", count: n_parallel) | ||
| 127 | var streamBuffers: [[CChar]] = .init(repeating: [], count: n_parallel) | ||
| 128 | var i_batch = [Int32](repeating: batch.n_tokens - 1, count: n_parallel) | ||
| 129 | |||
| 130 | var n_cur = batch.n_tokens | ||
| 131 | var n_decode = 0 | ||
| 132 | |||
| 133 | let t_main_start = ggml_time_us() | ||
| 134 | |||
| 135 | while n_cur <= n_len { | ||
| 136 | // prepare the next batch | ||
| 137 | batch.n_tokens = 0 | ||
| 138 | |||
| 139 | // sample the next token for each parallel sequence / stream | ||
| 140 | for i in 0 ..< n_parallel { | ||
| 141 | if i_batch[i] < 0 { | ||
| 142 | // the stream has already finished | ||
| 143 | continue | ||
| 144 | } | ||
| 145 | |||
| 146 | let new_token_id = llama_sampler_sample(smpl, context, i_batch[i]) | ||
| 147 | |||
| 148 | // is it an end of stream? -> mark the stream as finished | ||
| 149 | if llama_vocab_is_eog(vocab, new_token_id) || n_cur == n_len { | ||
| 150 | i_batch[i] = -1 | ||
| 151 | // print("") | ||
| 152 | if n_parallel > 1 { | ||
| 153 | print("stream \(i) finished at n_cur = \(n_cur)") | ||
| 154 | } | ||
| 155 | |||
| 156 | continue | ||
| 157 | } | ||
| 158 | |||
| 159 | let nextStringPiece = token_to_piece(token: new_token_id, buffer: &streamBuffers[i]) ?? "" | ||
| 160 | |||
| 161 | // if there is only one stream, we print immediately to stdout | ||
| 162 | if n_parallel == 1 { | ||
| 163 | print(nextStringPiece, terminator: "") | ||
| 164 | } | ||
| 165 | streams[i] += nextStringPiece | ||
| 166 | |||
| 167 | // push this new token for next evaluation | ||
| 168 | batch.token[Int(batch.n_tokens)] = new_token_id | ||
| 169 | batch.pos[Int(batch.n_tokens)] = n_cur | ||
| 170 | batch.n_seq_id[Int(batch.n_tokens)] = 1 | ||
| 171 | if let seq_id = batch.seq_id[Int(batch.n_tokens)] { | ||
| 172 | seq_id[0] = Int32(i) | ||
| 173 | } | ||
| 174 | batch.logits[Int(batch.n_tokens)] = 1 | ||
| 175 | |||
| 176 | i_batch[i] = batch.n_tokens | ||
| 177 | |||
| 178 | batch.n_tokens += 1 | ||
| 179 | |||
| 180 | n_decode += 1 | ||
| 181 | } | ||
| 182 | |||
| 183 | // all streams are finished | ||
| 184 | if batch.n_tokens == 0 { | ||
| 185 | break | ||
| 186 | } | ||
| 187 | |||
| 188 | n_cur += 1 | ||
| 189 | |||
| 190 | // evaluate the current batch with the transformer model | ||
| 191 | if llama_decode(context, batch) != 0 { | ||
| 192 | print("llama_decode() failed") | ||
| 193 | exit(1) | ||
| 194 | } | ||
| 195 | } | ||
| 196 | |||
| 197 | if n_parallel > 1 { | ||
| 198 | print("\n") | ||
| 199 | for (i, stream) in streams.enumerated() { | ||
| 200 | print("sequence \(i):\n\n\(prompt)\(stream)\n") | ||
| 201 | } | ||
| 202 | } | ||
| 203 | |||
| 204 | let t_main_end = ggml_time_us() | ||
| 205 | |||
| 206 | print("decoded \(n_decode) tokens in \(String(format: "%.2f", Double(t_main_end - t_main_start) / 1_000_000.0)) s, speed: \(String(format: "%.2f", Double(n_decode) / (Double(t_main_end - t_main_start) / 1_000_000.0))) t/s\n\n") | ||
| 207 | |||
| 208 | llama_perf_sampler_print(smpl) | ||
| 209 | llama_perf_context_print(context) | ||
| 210 | |||
| 211 | private func tokenize(text: String, add_bos: Bool) -> [llama_token] { | ||
| 212 | let utf8Count = text.utf8.count | ||
| 213 | let n_tokens = utf8Count + (add_bos ? 1 : 0) | ||
| 214 | let tokens = UnsafeMutablePointer<llama_token>.allocate(capacity: n_tokens) | ||
| 215 | let tokenCount = llama_tokenize(vocab, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, /*special tokens*/ false) | ||
| 216 | var swiftTokens: [llama_token] = [] | ||
| 217 | for i in 0 ..< tokenCount { | ||
| 218 | swiftTokens.append(tokens[Int(i)]) | ||
| 219 | } | ||
| 220 | tokens.deallocate() | ||
| 221 | return swiftTokens | ||
| 222 | } | ||
| 223 | |||
| 224 | private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? { | ||
| 225 | var result = [CChar](repeating: 0, count: 8) | ||
| 226 | let nTokens = llama_token_to_piece(vocab, token, &result, Int32(result.count), 0, false) | ||
| 227 | if nTokens < 0 { | ||
| 228 | let actualTokensCount = -Int(nTokens) | ||
| 229 | result = .init(repeating: 0, count: actualTokensCount) | ||
| 230 | let check = llama_token_to_piece( | ||
| 231 | vocab, | ||
| 232 | token, | ||
| 233 | &result, | ||
| 234 | Int32(result.count), | ||
| 235 | 0, | ||
| 236 | false | ||
| 237 | ) | ||
| 238 | assert(check == actualTokensCount) | ||
| 239 | } else { | ||
| 240 | result.removeLast(result.count - Int(nTokens)) | ||
| 241 | } | ||
| 242 | if buffer.isEmpty, let utfString = String(cString: result + [0], encoding: .utf8) { | ||
| 243 | return utfString | ||
| 244 | } else { | ||
| 245 | buffer.append(contentsOf: result) | ||
| 246 | let data = Data(buffer.map { UInt8(bitPattern: $0) }) | ||
| 247 | if buffer.count >= 4 { // 4 bytes is the max length of a utf8 character so if we're here we need to reset the buffer | ||
| 248 | buffer = [] | ||
| 249 | } | ||
| 250 | guard let bufferString = String(data: data, encoding: .utf8) else { | ||
| 251 | return nil | ||
| 252 | } | ||
| 253 | buffer = [] | ||
| 254 | return bufferString | ||
| 255 | } | ||
| 256 | } | ||
