1#include "models.h"
  2
  3ggml_cgraph * clip_graph_conformer::build() {
  4    const int n_frames   = img.nx;
  5    const int n_pos      = n_frames / 2;
  6    const int n_pos_embd = (((((n_frames + 1) / 2) + 1) / 2 + 1) / 2) * 2 - 1;
  7    GGML_ASSERT(model.position_embeddings->ne[1] >= n_pos);
  8
  9    ggml_tensor * pos_emb = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 512, n_pos_embd);
 10    ggml_set_name(pos_emb, "pos_emb");
 11    ggml_set_input(pos_emb);
 12    ggml_build_forward_expand(gf, pos_emb);
 13
 14    ggml_tensor * inp = build_inp_raw(1);
 15
 16    auto * cur = ggml_cont(ctx0, ggml_transpose(ctx0, inp));
 17
 18    // pre encode, conv subsampling
 19    {
 20        // layer.0 - conv2d
 21        cur = ggml_conv_2d(ctx0, model.pre_encode_conv_X_w[0], cur, 2, 2, 1, 1, 1, 1);
 22        cur = ggml_add(ctx0, cur, model.pre_encode_conv_X_b[0]);
 23        cb(cur, "conformer.pre_encode.conv.{}", 0);
 24
 25        // layer.1 - relu
 26        cur = ggml_relu_inplace(ctx0, cur);
 27
 28        // layer.2 conv2d dw
 29        cur = ggml_conv_2d_dw_direct(ctx0, model.pre_encode_conv_X_w[2], cur, 2, 2, 1, 1, 1, 1);
 30        cur = ggml_add(ctx0, cur, model.pre_encode_conv_X_b[2]);
 31        cb(cur, "conformer.pre_encode.conv.{}", 2);
 32
 33        // layer.3 conv2d
 34        cur = ggml_conv_2d_direct(ctx0, model.pre_encode_conv_X_w[3], cur, 1, 1, 0, 0, 1, 1);
 35        cur = ggml_add(ctx0, cur, model.pre_encode_conv_X_b[3]);
 36        cb(cur, "conformer.pre_encode.conv.{}", 3);
 37
 38        // layer.4 - relu
 39        cur = ggml_relu_inplace(ctx0, cur);
 40
 41        // layer.5 conv2d dw
 42        cur = ggml_conv_2d_dw_direct(ctx0, model.pre_encode_conv_X_w[5], cur, 2, 2, 1, 1, 1, 1);
 43        cur = ggml_add(ctx0, cur, model.pre_encode_conv_X_b[5]);
 44        cb(cur, "conformer.pre_encode.conv.{}", 5);
 45
 46        // layer.6 conv2d
 47        cur = ggml_conv_2d_direct(ctx0, model.pre_encode_conv_X_w[6], cur, 1, 1, 0, 0, 1, 1);
 48        cur = ggml_add(ctx0, cur, model.pre_encode_conv_X_b[6]);
 49        cb(cur, "conformer.pre_encode.conv.{}", 6);
 50
 51        // layer.7 - relu
 52        cur = ggml_relu_inplace(ctx0, cur);
 53
 54        // flatten channel and frequency axis
 55        cur = ggml_cont(ctx0, ggml_permute(ctx0, cur, 0, 2, 1, 3));
 56        cur = ggml_reshape_2d(ctx0, cur, cur->ne[0] * cur->ne[1], cur->ne[2]);
 57
 58        // calculate out
 59        cur = ggml_mul_mat(ctx0, model.pre_encode_out_w, cur);
 60        cur = ggml_add(ctx0, cur, model.pre_encode_out_b);
 61        cb(cur, "conformer.pre_encode.out", -1);
 62    }
 63
 64    // pos_emb
 65    cb(pos_emb, "pos_emb", -1);
 66
 67    for (int il = 0; il < hparams.n_layer; il++) {
 68        const auto & layer = model.layers[il];
 69
 70        auto * residual = cur;
 71
 72        cb(cur, "layer.in", il);
 73
 74        // feed_forward1
 75        cur = build_norm(cur, layer.ff_norm_w, layer.ff_norm_b, NORM_TYPE_NORMAL, 1e-5, il);
 76        cb(cur, "conformer.layers.{}.norm_feed_forward1", il);
 77
 78        cur = build_ffn(cur, layer.ff_up_w, layer.ff_up_b, nullptr, nullptr, layer.ff_down_w, layer.ff_down_b, FFN_SILU,
 79                        il);
 80        cb(cur, "conformer.layers.{}.feed_forward1.linear2", il);
 81
 82        const auto fc_factor = 0.5f;
 83        residual             = ggml_add(ctx0, residual, ggml_scale(ctx0, cur, fc_factor));
 84
 85        // self-attention
 86        {
 87            cur = build_norm(residual, layer.ln_1_w, layer.ln_1_b, NORM_TYPE_NORMAL, 1e-5, il);
 88            cb(cur, "conformer.layers.{}.norm_self_att", il);
 89
 90            ggml_tensor * Qcur     = ggml_mul_mat(ctx0, layer.q_w, cur);
 91            Qcur                   = ggml_add(ctx0, Qcur, layer.q_b);
 92            Qcur                   = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, Qcur->ne[1]);
 93            ggml_tensor * Q_bias_u = ggml_add(ctx0, Qcur, layer.pos_bias_u);
 94            Q_bias_u               = ggml_permute(ctx0, Q_bias_u, 0, 2, 1, 3);
 95            ggml_tensor * Q_bias_v = ggml_add(ctx0, Qcur, layer.pos_bias_v);
 96            Q_bias_v               = ggml_permute(ctx0, Q_bias_v, 0, 2, 1, 3);
 97
 98            // TODO @ngxson : some cont can/should be removed when ggml_mul_mat support these cases
 99            ggml_tensor * Kcur = ggml_mul_mat(ctx0, layer.k_w, cur);
100            Kcur               = ggml_add(ctx0, Kcur, layer.k_b);
101            Kcur               = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, Kcur->ne[1]);
102            Kcur               = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 0, 2, 1, 3));
103
104            ggml_tensor * Vcur = ggml_mul_mat(ctx0, layer.v_w, cur);
105            Vcur               = ggml_add(ctx0, Vcur, layer.v_b);
106            Vcur               = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, Vcur->ne[1]);
107            Vcur               = ggml_cont(ctx0, ggml_permute(ctx0, Vcur, 1, 2, 0, 3));
108
109            // build_attn won't fit due to matrix_ac and matrix_bd separation
110            ggml_tensor * matrix_ac = ggml_mul_mat(ctx0, Q_bias_u, Kcur);
111            matrix_ac               = ggml_cont(ctx0, ggml_permute(ctx0, matrix_ac, 1, 0, 2, 3));
112            cb(matrix_ac, "conformer.layers.{}.self_attn.id3", il);
113
114            auto * p = ggml_mul_mat(ctx0, layer.linear_pos_w, pos_emb);
115            cb(p, "conformer.layers.{}.self_attn.linear_pos", il);
116            p = ggml_reshape_3d(ctx0, p, d_head, n_head, p->ne[1]);
117            p = ggml_permute(ctx0, p, 0, 2, 1, 3);
118
119            auto * matrix_bd = ggml_mul_mat(ctx0, Q_bias_v, p);
120            matrix_bd        = ggml_cont(ctx0, ggml_permute(ctx0, matrix_bd, 1, 0, 2, 3));
121
122            // rel shift
123            {
124                const auto pos_len = matrix_bd->ne[0];
125                const auto q_len   = matrix_bd->ne[1];
126                const auto h       = matrix_bd->ne[2];
127                matrix_bd          = ggml_pad(ctx0, matrix_bd, 1, 0, 0, 0);
128                matrix_bd          = ggml_roll(ctx0, matrix_bd, 1, 0, 0, 0);
129                matrix_bd          = ggml_reshape_3d(ctx0, matrix_bd, q_len, pos_len + 1, h);
130                matrix_bd          = ggml_view_3d(ctx0, matrix_bd, q_len, pos_len, h, matrix_bd->nb[1],
131                                                        matrix_bd->nb[2], matrix_bd->nb[0] * q_len);
132                matrix_bd          = ggml_cont_3d(ctx0, matrix_bd, pos_len, q_len, h);
133            }
134
135            matrix_bd     = ggml_view_3d(ctx0, matrix_bd, matrix_ac->ne[0], matrix_bd->ne[1],
136                                               matrix_bd->ne[2], matrix_bd->nb[1], matrix_bd->nb[2], 0);
137            auto * scores = ggml_add(ctx0, matrix_ac, matrix_bd);
138            scores        = ggml_scale(ctx0, scores, 1.0f / std::sqrt(d_head));
139            cb(scores, "conformer.layers.{}.self_attn.id0", il);
140
141            ggml_tensor * attn = ggml_soft_max(ctx0, scores);
142            ggml_tensor * x    = ggml_mul_mat(ctx0, attn, Vcur);
143            x                  = ggml_permute(ctx0, x, 2, 0, 1, 3);
144            x                  = ggml_cont_2d(ctx0, x, x->ne[0] * x->ne[1], x->ne[2]);
145
146            ggml_tensor * out = ggml_mul_mat(ctx0, layer.o_w, x);
147            out               = ggml_add(ctx0, out, layer.o_b);
148            cb(out, "conformer.layers.{}.self_attn.linear_out", il);
149
150            cur = out;
151        }
152
153        residual = ggml_add(ctx0, residual, cur);
154        cur      = build_norm(residual, layer.norm_conv_w, layer.norm_conv_b, NORM_TYPE_NORMAL, 1e-5, il);
155        cb(cur, "conformer.layers.{}.norm_conv", il);
156
157        // conv
158        {
159            auto * x = cur;
160            x = ggml_mul_mat(ctx0, layer.conv_pw1_w, x);
161            x = ggml_add(ctx0, x, layer.conv_pw1_b);
162            cb(x, "conformer.layers.{}.conv.pointwise_conv1", il);
163
164            // ggml_glu doesn't support sigmoid
165            // TODO @ngxson : support this ops in ggml
166            {
167                int64_t       d    = x->ne[0] / 2;
168                ggml_tensor * gate = ggml_sigmoid(ctx0, ggml_view_2d(ctx0, x, d, x->ne[1], x->nb[1], d * x->nb[0]));
169                x                  = ggml_mul(ctx0, ggml_view_2d(ctx0, x, d, x->ne[1], x->nb[1], 0), gate);
170                x                  = ggml_cont(ctx0, ggml_transpose(ctx0, x));
171            }
172
173            // use ggml_ssm_conv for f32 precision
174            x = ggml_pad(ctx0, x, 4, 0, 0, 0);
175            x = ggml_roll(ctx0, x, 4, 0, 0, 0);
176            x = ggml_pad(ctx0, x, 4, 0, 0, 0);
177            x = ggml_ssm_conv(ctx0, x, layer.conv_dw_w);
178            x = ggml_add(ctx0, x, layer.conv_dw_b);
179
180            x = ggml_add(ctx0, ggml_mul(ctx0, x, layer.conv_norm_w), layer.conv_norm_b);
181            x = ggml_silu(ctx0, x);
182
183            // pointwise_conv2
184            x = ggml_mul_mat(ctx0, layer.conv_pw2_w, x);
185            x = ggml_add(ctx0, x, layer.conv_pw2_b);
186
187            cur = x;
188        }
189
190        residual = ggml_add(ctx0, residual, cur);
191
192        cur = build_norm(residual, layer.ff_norm_1_w, layer.ff_norm_1_b, NORM_TYPE_NORMAL, 1e-5, il);
193        cb(cur, "conformer.layers.{}.norm_feed_forward2", il);
194
195        cur = build_ffn(cur, layer.ff_up_1_w, layer.ff_up_1_b, nullptr, nullptr, layer.ff_down_1_w, layer.ff_down_1_b,
196                        FFN_SILU, il);  // TODO(tarek): read activation for ffn from hparams
197        cb(cur, "conformer.layers.{}.feed_forward2.linear2", il);
198
199        residual = ggml_add(ctx0, residual, ggml_scale(ctx0, cur, fc_factor));
200        cb(residual, "conformer.layers.{}.conv.id", il);
201
202        cur = build_norm(residual, layer.ln_2_w, layer.ln_2_b, NORM_TYPE_NORMAL, 1e-5, il);
203        cb(cur, "conformer.layers.{}.norm_out", il);
204    }
205
206    // audio adapter
207    cur = build_norm(cur, model.mm_0_w, model.mm_0_b, NORM_TYPE_NORMAL, 1e-5, -1);
208    cb(cur, "audio_adapter.model.{}", 0);
209    cur = build_ffn(cur, model.mm_1_w, model.mm_1_b, nullptr, nullptr, model.mm_3_w, model.mm_3_b, FFN_GELU_ERF, -1);
210
211    cb(cur, "projected", -1);
212
213    ggml_build_forward_expand(gf, cur);
214
215    return gf;
216}