1import Foundation
  2import llama
  3
  4let arguments = CommandLine.arguments
  5
  6// Check that we have at least one argument (the model path)
  7guard arguments.count > 1 else {
  8    print("Usage: swift MODEL_PATH [PROMPT] [PARALLEL]")
  9    exit(1)
 10}
 11
 12let modelPath: String = arguments[1]
 13let prompt: String = arguments.count > 2 ? arguments[2] : "Hello my name is"
 14let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(arguments[3])! : 1
 15
 16// total length of the sequences including the prompt
 17let n_len: Int = 32
 18
 19// init LLM
 20llama_backend_init()
 21defer {
 22    llama_backend_free()
 23}
 24
 25let model_params = llama_model_default_params()
 26guard let model = llama_model_load_from_file(modelPath.cString(using: .utf8), model_params) else {
 27    print("Failed to load model")
 28    exit(1)
 29}
 30defer {
 31    llama_model_free(model)
 32}
 33
 34guard let vocab = llama_model_get_vocab(model) else {
 35    print("Failed to get vocab")
 36    exit(1)
 37}
 38
 39var tokens = tokenize(text: prompt, add_bos: true)
 40
 41let n_kv_req = UInt32(tokens.count) + UInt32((n_len - Int(tokens.count)) * n_parallel)
 42
 43var context_params = llama_context_default_params()
 44context_params.n_ctx = n_kv_req
 45context_params.n_batch = UInt32(max(n_len, n_parallel))
 46context_params.n_threads = 8
 47context_params.n_threads_batch = 8
 48
 49let context = llama_init_from_model(model, context_params)
 50guard context != nil else {
 51    print("Failed to initialize context")
 52    exit(1)
 53}
 54defer {
 55    llama_free(context)
 56}
 57
 58var sparams = llama_sampler_chain_default_params()
 59
 60let smpl = llama_sampler_chain_init(sparams)
 61guard smpl != nil else {
 62    print("Failed to initialize sampling")
 63    exit(1)
 64}
 65defer {
 66    llama_sampler_free(smpl)
 67}
 68
 69llama_sampler_chain_add(smpl, llama_sampler_init_top_k(40));
 70llama_sampler_chain_add(smpl, llama_sampler_init_top_p(0.9, 1));
 71llama_sampler_chain_add(smpl, llama_sampler_init_temp (0.4));
 72llama_sampler_chain_add(smpl, llama_sampler_init_dist (1234));
 73
 74let n_ctx = llama_n_ctx(context)
 75
 76print("\nn_len = \(n_len), n_ctx = \(n_ctx), n_batch = \(context_params.n_batch), n_parallel = \(n_parallel), n_kv_req = \(n_kv_req)\n")
 77
 78if n_kv_req > n_ctx {
 79    print("error: n_kv_req (%d) > n_ctx, the required KV cache size is not big enough\n", n_kv_req)
 80    exit(1)
 81}
 82
 83var buffer: [CChar] = []
 84for id: llama_token in tokens {
 85    print(token_to_piece(token: id, buffer: &buffer) ?? "", terminator: "")
 86}
 87
 88print("\n")
 89
 90var batch = llama_batch_init(max(Int32(tokens.count), Int32(n_parallel)), 0, 1)
 91defer {
 92    llama_batch_free(batch)
 93}
 94
 95// evaluate the initial prompt
 96batch.n_tokens = Int32(tokens.count)
 97
 98for (i, token) in tokens.enumerated() {
 99    batch.token[i] = token
100    batch.pos[i] = Int32(i)
101    batch.n_seq_id[i] = 1
102    // batch.seq_id[i][0] = 0
103    // TODO: is this the proper way to do this?
104    if let seq_id = batch.seq_id[i] {
105        seq_id[0] = 0
106    }
107    batch.logits[i] = 0
108}
109
110// llama_decode will output logits only for the last token of the prompt
111batch.logits[Int(batch.n_tokens) - 1] = 1
112
113if llama_decode(context, batch) != 0 {
114    print("llama_decode() failed")
115    exit(1)
116}
117
118for i in 1 ..< n_parallel {
119    llama_memory_seq_cp(llama_get_memory(context), 0, Int32(i), 0, batch.n_tokens)
120}
121
122if n_parallel > 1 {
123    print("generating \(n_parallel) sequences ...\n")
124}
125
126var streams: [String] = .init(repeating: "", count: n_parallel)
127var streamBuffers: [[CChar]] = .init(repeating: [], count: n_parallel)
128var i_batch = [Int32](repeating: batch.n_tokens - 1, count: n_parallel)
129
130var n_cur = batch.n_tokens
131var n_decode = 0
132
133let t_main_start = ggml_time_us()
134
135while n_cur <= n_len {
136    // prepare the next batch
137    batch.n_tokens = 0
138
139    // sample the next token for each parallel sequence / stream
140    for i in 0 ..< n_parallel {
141        if i_batch[i] < 0 {
142            // the stream has already finished
143            continue
144        }
145
146        let new_token_id = llama_sampler_sample(smpl, context, i_batch[i])
147
148        // is it an end of stream? -> mark the stream as finished
149        if llama_vocab_is_eog(vocab, new_token_id) || n_cur == n_len {
150            i_batch[i] = -1
151            // print("")
152            if n_parallel > 1 {
153                print("stream \(i) finished at n_cur = \(n_cur)")
154            }
155
156            continue
157        }
158
159        let nextStringPiece = token_to_piece(token: new_token_id, buffer: &streamBuffers[i]) ?? ""
160
161        // if there is only one stream, we print immediately to stdout
162        if n_parallel == 1 {
163            print(nextStringPiece, terminator: "")
164        }
165        streams[i] += nextStringPiece
166
167        // push this new token for next evaluation
168        batch.token[Int(batch.n_tokens)] = new_token_id
169        batch.pos[Int(batch.n_tokens)] = n_cur
170        batch.n_seq_id[Int(batch.n_tokens)] = 1
171        if let seq_id = batch.seq_id[Int(batch.n_tokens)] {
172            seq_id[0] = Int32(i)
173        }
174        batch.logits[Int(batch.n_tokens)] = 1
175
176        i_batch[i] = batch.n_tokens
177
178        batch.n_tokens += 1
179
180        n_decode += 1
181    }
182
183    // all streams are finished
184    if batch.n_tokens == 0 {
185        break
186    }
187
188    n_cur += 1
189
190    // evaluate the current batch with the transformer model
191    if llama_decode(context, batch) != 0 {
192        print("llama_decode() failed")
193        exit(1)
194    }
195}
196
197if n_parallel > 1 {
198    print("\n")
199    for (i, stream) in streams.enumerated() {
200        print("sequence \(i):\n\n\(prompt)\(stream)\n")
201    }
202}
203
204let t_main_end = ggml_time_us()
205
206print("decoded \(n_decode) tokens in \(String(format: "%.2f", Double(t_main_end - t_main_start) / 1_000_000.0)) s, speed: \(String(format: "%.2f", Double(n_decode) / (Double(t_main_end - t_main_start) / 1_000_000.0))) t/s\n\n")
207
208llama_perf_sampler_print(smpl)
209llama_perf_context_print(context)
210
211private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
212    let utf8Count = text.utf8.count
213    let n_tokens = utf8Count + (add_bos ? 1 : 0)
214    let tokens = UnsafeMutablePointer<llama_token>.allocate(capacity: n_tokens)
215    let tokenCount = llama_tokenize(vocab, text, Int32(utf8Count), tokens, Int32(n_tokens), add_bos, /*special tokens*/ false)
216    var swiftTokens: [llama_token] = []
217    for i in 0 ..< tokenCount {
218        swiftTokens.append(tokens[Int(i)])
219    }
220    tokens.deallocate()
221    return swiftTokens
222}
223
224private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? {
225    var result = [CChar](repeating: 0, count: 8)
226    let nTokens = llama_token_to_piece(vocab, token, &result, Int32(result.count), 0, false)
227    if nTokens < 0 {
228        let actualTokensCount = -Int(nTokens)
229        result = .init(repeating: 0, count: actualTokensCount)
230        let check = llama_token_to_piece(
231            vocab,
232            token,
233            &result,
234            Int32(result.count),
235            0,
236            false
237        )
238        assert(check == actualTokensCount)
239    } else {
240        result.removeLast(result.count - Int(nTokens))
241    }
242    if buffer.isEmpty, let utfString = String(cString: result + [0], encoding: .utf8) {
243        return utfString
244    } else {
245        buffer.append(contentsOf: result)
246        let data = Data(buffer.map { UInt8(bitPattern: $0) })
247        if buffer.count >= 4 { // 4 bytes is the max length of a utf8 character so if we're here we need to reset the buffer
248            buffer = []
249        }
250        guard let bufferString = String(data: data, encoding: .utf8) else {
251            return nil
252        }
253        buffer = []
254        return bufferString
255    }
256}