1#include "ggml.h"
  2#include "ggml-alloc.h"
  3#include "gguf.h"
  4
  5#include "arg.h"
  6#include "common.h"
  7
  8#include <map>
  9#include <vector>
 10#include <string>
 11#include <fstream>
 12
 13static bool g_verbose = false;
 14
 15struct tensor_transformation {
 16    struct ggml_tensor * in;
 17    struct ggml_tensor * out;
 18    bool is_copy;
 19};
 20
 21static std::string get_kv_str(struct gguf_context * ctx_gguf, const std::string & key){
 22    int id = gguf_find_key(ctx_gguf, key.c_str());
 23    return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf, id));
 24}
 25
 26static float get_kv_f32(struct gguf_context * ctx_gguf, const std::string & key) {
 27    int id = gguf_find_key(ctx_gguf, key.c_str());
 28    return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf, id);
 29}
 30
 31static void zeros(std::ofstream & file, size_t n) {
 32    char zero = 0;
 33    for (size_t i = 0; i < n; ++i) {
 34        file.write(&zero, 1);
 35    }
 36}
 37
 38static std::string ggml_ne_string(const ggml_tensor * t) {
 39    std::string str;
 40    for (int i = 0; i < GGML_MAX_DIMS; ++i) {
 41        str += std::to_string(t->ne[i]);
 42        if (i + 1 < GGML_MAX_DIMS) {
 43            str += ", ";
 44        }
 45    }
 46    return str;
 47}
 48
 49static struct gguf_context * load_gguf(std::string & fname, struct ggml_context ** ctx_ggml) {
 50    struct gguf_init_params params = {
 51        /*.no_alloc = */ true,
 52        /*.ctx      = */ ctx_ggml,
 53    };
 54    struct gguf_context * ctx_gguf = gguf_init_from_file(fname.c_str(), params);
 55    if (!ctx_gguf) {
 56        throw std::runtime_error("failed to load input GGUF from " + fname);
 57    }
 58    return ctx_gguf;
 59}
 60
 61struct file_input {
 62    struct ggml_context * ctx_meta = nullptr;
 63    struct gguf_context * ctx_gguf = nullptr;
 64    std::ifstream f_in;
 65    std::map<std::string, ggml_tensor *> tensors;
 66    float alpha;
 67    float scale;
 68
 69    file_input(std::string & fname, float scale): f_in(fname, std::ios::binary), scale(scale) {
 70        if (!f_in.is_open()) {
 71            throw std::runtime_error("failed to open input gguf from " + fname);
 72        }
 73
 74        ctx_gguf = load_gguf(fname, &ctx_meta);
 75        alpha = get_kv_f32(ctx_gguf, "adapter.lora.alpha");
 76        printf("%s: loaded gguf from %s\n", __func__, fname.c_str());
 77
 78        for (ggml_tensor * cur = ggml_get_first_tensor(ctx_meta); cur; cur = ggml_get_next_tensor(ctx_meta, cur)) {
 79            std::string name(cur->name);
 80            tensors[name] = cur;
 81            if (g_verbose) {
 82                printf("%s: %s\n", __func__, cur->name);
 83            }
 84        }
 85    }
 86
 87    ggml_tensor * get_tensor(std::string name) {
 88        if (tensors.find(name) == tensors.end()) {
 89            return nullptr;
 90        }
 91        return tensors[name];
 92    }
 93
 94    void read_tensor_data(std::string name, std::vector<uint8_t> & buf) {
 95        if (tensors.find(name) == tensors.end()) {
 96            throw std::runtime_error("cannot find tensor with name: " + name);
 97        }
 98        auto len = ggml_nbytes(tensors[name]);
 99        if (buf.size() < len) {
100            buf.resize(len);
101        }
102        auto i_tensor_in = gguf_find_tensor(ctx_gguf, name.c_str()); // idx of tensor in the input file
103        auto offset = gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, i_tensor_in);
104        f_in.seekg(offset);
105        f_in.read((char* )buf.data(), len);
106    }
107
108    ~file_input() {
109        gguf_free(ctx_gguf);
110        ggml_free(ctx_meta);
111    }
112};
113
114struct lora_merge_ctx {
115    // input base model + adapters
116    file_input base_model;
117    std::vector<std::unique_ptr<file_input>> adapters;
118
119    // for computing merged tensor
120    int n_threads;
121    ggml_backend_t backend = nullptr;
122    ggml_gallocr_t allocr = nullptr;
123    std::vector<uint8_t> read_buf;
124
125    // output file
126    struct gguf_context * ctx_out;
127    struct ggml_context * ctx_out_ggml;
128    std::ofstream fout;
129
130    lora_merge_ctx(
131            std::string & base_fname,
132            std::vector<common_adapter_lora_info> & lora_files,
133            std::string & outfile,
134            int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) {
135        fout.exceptions(std::ofstream::failbit); // fail fast on write errors
136
137        if (gguf_find_key(base_model.ctx_gguf, LLM_KV_SPLIT_COUNT) >= 0) {
138            throw std::runtime_error("split model is not yet supported");
139        }
140
141        for (auto & lora_inp : lora_files) {
142            auto fname = lora_inp.path;
143            auto scale = lora_inp.scale;
144            std::unique_ptr<file_input> adapter(new file_input(fname, scale));
145            check_metadata_lora(adapter.get());
146            adapters.push_back(std::move(adapter));
147        }
148
149        ctx_out = gguf_init_empty();
150        struct ggml_init_params params = {
151            /*.mem_size   =*/ static_cast<size_t>(gguf_get_n_tensors(base_model.ctx_gguf)*ggml_tensor_overhead()),
152            /*.mem_buffer =*/ NULL,
153            /*.no_alloc   =*/ true,
154        };
155        ctx_out_ggml = ggml_init(params);
156        backend = ggml_backend_cpu_init();
157        allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(backend));
158    }
159
160    void check_metadata_lora(file_input * adapter) {
161        auto general_type = get_kv_str(adapter->ctx_gguf, "general.type");
162        if (general_type != "adapter") {
163            throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
164        }
165
166        auto adapter_type = get_kv_str(adapter->ctx_gguf, "adapter.type");
167        if (adapter_type != "lora") {
168            throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
169        }
170
171        auto general_arch_base = get_kv_str(base_model.ctx_gguf, "general.architecture");
172        auto general_arch_lora = get_kv_str(adapter->ctx_gguf,   "general.architecture");
173        if (general_arch_base != general_arch_lora) {
174            throw std::runtime_error("model arch and LoRA arch mismatch");
175        }
176    }
177
178    ggml_type get_out_tensor_type(struct ggml_tensor * t) {
179        if (t->type == GGML_TYPE_F32) {
180            return GGML_TYPE_F32;
181        } else {
182            return GGML_TYPE_F16;
183        }
184    }
185
186    void run_merge() {
187        // prepare metadata
188        gguf_set_kv(ctx_out, base_model.ctx_gguf);
189        // output is forced to f16 for now
190        gguf_set_val_u32(ctx_out, "general.file_type", LLAMA_FTYPE_MOSTLY_F16);
191
192        // check if all lora adapters have the same tensors
193        // TODO: remove this when we can support merging subset of adapters. Ref: https://github.com/ggml-org/llama.cpp/pull/8607#discussion_r1686027777
194        static const char * err_no_subset_adapter = "Input adapters do not have the same list of tensors. This is not yet supported. Please merge the adapter one-by-one instead of merging all at once.";
195        if (adapters.size() > 1) {
196            for (size_t i = 1; i < adapters.size(); ++i) {
197                if (adapters[0]->tensors.size() != adapters[i]->tensors.size()) {
198                    throw std::runtime_error(err_no_subset_adapter);
199                }
200                for (auto & it : adapters[i]->tensors) {
201                    if (adapters[0]->get_tensor(it.first) == nullptr) {
202                        throw std::runtime_error(err_no_subset_adapter);
203                    }
204                }
205            }
206        }
207
208        // mapping base tensor to out tensor (same shape with base, but different type)
209        std::vector<tensor_transformation> trans;
210        for (auto & it : base_model.tensors) {
211            bool t_a = true;
212            bool t_b = true;
213            for (auto & adapter : adapters) {
214                t_a &= nullptr != adapter->get_tensor(it.first + ".lora_a");
215                t_b &= nullptr != adapter->get_tensor(it.first + ".lora_b");
216            }
217            auto base_tensor = it.second;
218            if (!t_a && !t_b) {
219                // only copy
220                struct ggml_tensor * cpy_tensor = ggml_dup_tensor(ctx_out_ggml, base_tensor);
221                ggml_set_name(cpy_tensor, base_tensor->name);
222                trans.push_back({
223                    cpy_tensor,
224                    cpy_tensor,
225                    true,
226                });
227                gguf_add_tensor(ctx_out, cpy_tensor);
228            } else if (t_a && t_b) {
229                // need merging
230                struct ggml_tensor * out_tensor = ggml_new_tensor(
231                    ctx_out_ggml, get_out_tensor_type(base_tensor), GGML_MAX_DIMS, base_tensor->ne);
232                ggml_set_name(out_tensor, base_tensor->name);
233                trans.push_back({
234                    base_tensor,
235                    out_tensor,
236                    false,
237                });
238                gguf_add_tensor(ctx_out, out_tensor);
239            } else {
240                throw std::runtime_error("tensor " + it.first + " missing either lora_a or lora_b");
241            }
242        }
243
244        // placeholder for the meta data
245        {
246            size_t meta_size = gguf_get_meta_size(ctx_out);
247            zeros(fout, meta_size);
248        }
249
250        // process base model tensors
251        size_t n_merged = 0;
252        for (auto & it : trans) {
253            if (!it.is_copy) {
254                merge_tensor(it.in, it.out);
255                n_merged++;
256            } else {
257                copy_tensor(it.in);
258            }
259        }
260
261        // write output metadata
262        {
263            std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
264            gguf_get_meta_data(ctx_out, data.data());
265            fout.seekp(0);
266            fout.write((const char *)data.data(), data.size());
267        }
268
269        printf("%s : merged %zu tensors with lora adapters\n", __func__, n_merged);
270        printf("%s : wrote %zu tensors to output file\n", __func__, trans.size());
271    }
272
273    void copy_tensor(struct ggml_tensor * base) {
274        printf("%s :  %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
275        size_t len = ggml_nbytes(base);
276        base_model.read_tensor_data(base->name, read_buf);
277        fout.write((char* )read_buf.data(), len);
278        zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
279    }
280
281    void merge_tensor(struct ggml_tensor * base, struct ggml_tensor * out) {
282        std::string name_base(base->name);
283        std::string name_lora_a = name_base + ".lora_a";
284        std::string name_lora_b = name_base + ".lora_b";
285
286        printf("%s : %s [%s]\n", __func__, base->name, ggml_ne_string(base).c_str());
287
288        // context for input tensor
289        std::vector<struct ggml_tensor *> inp_a(adapters.size());
290        std::vector<struct ggml_tensor *> inp_b(adapters.size());
291        struct ggml_init_params params {
292            /*.mem_size   =*/ ggml_tensor_overhead()*(2+adapters.size()*2),
293            /*.mem_buffer =*/ NULL,
294            /*.no_alloc   =*/ true,
295        };
296        struct ggml_context * ctx = ggml_init(params);
297
298        // alloc tensors
299        struct ggml_tensor * inp_base = ggml_new_tensor(ctx, GGML_TYPE_F32, GGML_MAX_DIMS, base->ne);
300        for (size_t i = 0; i < adapters.size(); ++i) {
301            auto t_a = adapters[i]->get_tensor(name_lora_a);
302            auto t_b = adapters[i]->get_tensor(name_lora_b);
303            // TODO: add support for quantized lora
304            if (ggml_is_quantized(t_a->type) || ggml_is_quantized(t_b->type)) {
305                throw std::runtime_error("quantized LoRA adapters is not supported, please retry with f16 or f32");
306            }
307            inp_a[i] = ggml_dup_tensor(ctx, t_a);
308            inp_b[i] = ggml_dup_tensor(ctx, t_b);
309        }
310        ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
311
312        // load base tensor to backend buffer
313        base_model.read_tensor_data(name_base, read_buf);
314        if (base->type != GGML_TYPE_F32) {
315            // optionally dequantize it
316            printf("%s :   + dequantize base tensor from %s to F32\n", __func__, ggml_type_name(base->type));
317            auto nels = ggml_nelements(inp_base);
318            const auto * qtype = ggml_get_type_traits(base->type);
319            std::vector<uint8_t> dequant_buf(nels * sizeof(float));
320            qtype->to_float(read_buf.data(), (float *)dequant_buf.data(), nels);
321            ggml_backend_tensor_set(inp_base, dequant_buf.data(), 0, dequant_buf.size());
322        } else {
323            ggml_backend_tensor_set(inp_base, read_buf.data(), 0, ggml_nbytes(inp_base));
324        }
325
326        // load lora tensors to backend buffer
327        for (size_t i = 0; i < adapters.size(); ++i) {
328            adapters[i]->read_tensor_data(name_lora_a, read_buf);
329            ggml_backend_tensor_set(inp_a[i], read_buf.data(), 0, ggml_nbytes(inp_a[i]));
330            adapters[i]->read_tensor_data(name_lora_b, read_buf);
331            ggml_backend_tensor_set(inp_b[i], read_buf.data(), 0, ggml_nbytes(inp_b[i]));
332        }
333
334        // build graph
335        struct ggml_cgraph * gf;
336        {
337            static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
338            static std::vector<uint8_t> buf(buf_size);
339            struct ggml_init_params params0 = {
340                /*.mem_size   =*/ buf_size,
341                /*.mem_buffer =*/ buf.data(),
342                /*.no_alloc   =*/ true,
343            };
344            struct ggml_context * ctx0 = ggml_init(params0);
345            gf = ggml_new_graph(ctx0);
346            struct ggml_tensor * cur = inp_base;
347            for (size_t i = 0; i < adapters.size(); ++i) {
348                struct ggml_tensor * delta;
349                bool is_tok_embd = string_starts_with(name_base, "token_embd");
350                if (is_tok_embd) {
351                    printf("%s :     detected token embeddings tensor\n", __func__);
352                    delta = ggml_mul_mat(ctx0,
353                        ggml_cast(ctx0, inp_b[i], GGML_TYPE_F32),
354                        ggml_cast(ctx0, inp_a[i], GGML_TYPE_F32));
355                } else {
356                    delta = ggml_mul_mat(ctx0,
357                        ggml_cont(ctx0, ggml_transpose(ctx0, ggml_cast(ctx0, inp_a[i], GGML_TYPE_F32))),
358                        ggml_cast(ctx0, inp_b[i], GGML_TYPE_F32));
359                }
360                // scale
361                const float alpha = adapters[i]->alpha;
362                const float rank  = (float) inp_b[i]->ne[0];
363                const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale;
364                delta = ggml_scale(ctx0, delta, scale);
365                cur = ggml_add(ctx0, delta, cur);
366                printf("%s :   + merging from adapter[%zu] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
367                printf("%s :     input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]);
368            }
369            cur = ggml_cast(ctx0, cur, out->type);
370            printf("%s :   + output type is %s\n", __func__, ggml_type_name(out->type));
371            ggml_build_forward_expand(gf, cur);
372            ggml_free(ctx0);
373        }
374
375        // compute
376        {
377            ggml_gallocr_alloc_graph(allocr, gf);
378            ggml_backend_cpu_set_n_threads(backend, n_threads);
379            ggml_backend_graph_compute(backend, gf);
380        }
381
382        // write data to output file
383        {
384            auto * result = ggml_graph_node(gf, -1);
385            size_t len = ggml_nbytes(result);
386            if (read_buf.size() < len) {
387                read_buf.resize(len);
388            }
389            ggml_backend_tensor_get(result, read_buf.data(), 0, len);
390            fout.write((char* )read_buf.data(), len);
391            zeros(fout, GGML_PAD(len, GGUF_DEFAULT_ALIGNMENT) - len);
392        }
393
394        ggml_free(ctx);
395        ggml_backend_buffer_free(buffer);
396    }
397
398    ~lora_merge_ctx() {
399        ggml_gallocr_free(allocr);
400        ggml_backend_free(backend);
401        gguf_free(ctx_out);
402        ggml_free(ctx_out_ggml);
403    }
404};
405
406static void print_usage(int, char ** argv) {
407    printf("\nexample usage:\n");
408    printf("\n  %s -m base-model.gguf --lora lora-file.gguf -o merged-model-f16.gguf\n", argv[0]);
409    printf("\nNOTE: output model is F16\n");
410    printf("\n");
411}
412
413int main(int argc, char ** argv) {
414    common_params params;
415
416    params.out_file = "ggml-lora-merged-f16.gguf";
417
418    if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_EXPORT_LORA, print_usage)) {
419        return 1;
420    }
421
422    g_verbose = (params.verbosity > 1);
423    try {
424        lora_merge_ctx ctx(params.model.path, params.lora_adapters, params.out_file, params.cpuparams.n_threads);
425        ctx.run_merge();
426    } catch (const std::exception & err) {
427        fprintf(stderr, "%s\n", err.what());
428        exit(EXIT_FAILURE);
429    }
430
431    printf("done, output file is %s\n", params.out_file.c_str());
432
433    return 0;
434}