1#!/usr/bin/env bash
  2#
  3# sample usage:
  4#
  5# mkdir tmp
  6#
  7# # CPU-only build
  8# bash ./ci/run.sh ./tmp/results ./tmp/mnt
  9#
 10# # with CUDA support
 11# GG_BUILD_CUDA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
 12#
 13# # with SYCL support
 14# GG_BUILD_SYCL=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
 15#
 16# # with VULKAN support
 17# GG_BUILD_VULKAN=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
 18#
 19# # with WebGPU support
 20# GG_BUILD_WEBGPU=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
 21#
 22# # with MUSA support
 23# GG_BUILD_MUSA=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
 24#
 25# # with KLEIDIAI support
 26# GG_BUILD_KLEIDIAI=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
 27#
 28
 29if [ -z "$2" ]; then
 30    echo "usage: $0 <output-dir> <mnt-dir>"
 31    exit 1
 32fi
 33
 34mkdir -p "$1"
 35mkdir -p "$2"
 36
 37OUT=$(realpath "$1")
 38MNT=$(realpath "$2")
 39
 40rm -f $OUT/*.log
 41rm -f $OUT/*.exit
 42rm -f $OUT/*.md
 43
 44sd=`dirname $0`
 45cd $sd/../
 46SRC=`pwd`
 47
 48CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=${LLAMA_FATAL_WARNINGS:-ON} -DLLAMA_OPENSSL=OFF -DGGML_SCHED_NO_REALLOC=ON"
 49
 50if [ ! -z ${GG_BUILD_METAL} ]; then
 51    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON"
 52fi
 53
 54if [ ! -z ${GG_BUILD_CUDA} ]; then
 55    # TODO: Remove GGML_CUDA_CUB_3DOT2 flag once CCCL 3.2 is bundled within CTK and that CTK version is used in this project
 56    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_CUDA=ON -DGGML_CUDA_CUB_3DOT2=ON"
 57
 58    if command -v nvidia-smi >/dev/null 2>&1; then
 59        CUDA_ARCH=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader,nounits 2>/dev/null | head -1 | tr -d '.')
 60        if [[ -n "$CUDA_ARCH" && "$CUDA_ARCH" =~ ^[0-9]+$ ]]; then
 61            CMAKE_EXTRA="${CMAKE_EXTRA} -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH}"
 62        else
 63            echo "Warning: Using fallback CUDA architectures"
 64            CMAKE_EXTRA="${CMAKE_EXTRA} -DCMAKE_CUDA_ARCHITECTURES=61;70;75;80;86;89"
 65        fi
 66    else
 67        echo "Error: nvidia-smi not found, cannot build with CUDA"
 68        exit 1
 69    fi
 70fi
 71
 72if [ ! -z ${GG_BUILD_ROCM} ]; then
 73    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_HIP=ON"
 74    if [ -z ${GG_BUILD_AMDGPU_TARGETS} ]; then
 75        echo "Missing GG_BUILD_AMDGPU_TARGETS, please set it to your GPU architecture (e.g. gfx90a, gfx1100, etc.)"
 76        exit 1
 77    fi
 78
 79    CMAKE_EXTRA="${CMAKE_EXTRA} -DGPU_TARGETS=${GG_BUILD_AMDGPU_TARGETS}"
 80fi
 81
 82if [ ! -z ${GG_BUILD_SYCL} ]; then
 83    if [ -z ${ONEAPI_ROOT} ]; then
 84        echo "Not detected ONEAPI_ROOT, please install oneAPI base toolkit and enable it by:"
 85        echo "source /opt/intel/oneapi/setvars.sh"
 86        exit 1
 87    fi
 88    # Use only main GPU
 89    export ONEAPI_DEVICE_SELECTOR="level_zero:0"
 90    # Enable sysman for correct memory reporting
 91    export ZES_ENABLE_SYSMAN=1
 92    # to circumvent precision issues on CPY operations
 93    export SYCL_PROGRAM_COMPILE_OPTIONS="-cl-fp32-correctly-rounded-divide-sqrt"
 94    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_SYCL=1 -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON"
 95fi
 96
 97if [ ! -z ${GG_BUILD_VULKAN} ]; then
 98    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_VULKAN=1"
 99
100    # if on Mac, disable METAL
101    if [[ "$OSTYPE" == "darwin"* ]]; then
102        CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=OFF -DGGML_BLAS=OFF"
103    fi
104
105fi
106
107if [ ! -z ${GG_BUILD_WEBGPU} ]; then
108    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_WEBGPU=1 -DGGML_METAL=OFF -DGGML_BLAS=OFF"
109
110    if [ ! -z "${GG_BUILD_WEBGPU_DAWN_PREFIX}" ]; then
111        if [ -z "${CMAKE_PREFIX_PATH}" ]; then
112            export CMAKE_PREFIX_PATH="${GG_BUILD_WEBGPU_DAWN_PREFIX}"
113        else
114            export CMAKE_PREFIX_PATH="${GG_BUILD_WEBGPU_DAWN_PREFIX}:${CMAKE_PREFIX_PATH}"
115        fi
116    fi
117
118    # For some systems, Dawn_DIR needs to be set explicitly, e.g., the lib64 path
119    if [ ! -z "${GG_BUILD_WEBGPU_DAWN_DIR}" ]; then
120        CMAKE_EXTRA="${CMAKE_EXTRA} -DDawn_DIR=${GG_BUILD_WEBGPU_DAWN_DIR}"
121    fi
122fi
123
124if [ ! -z ${GG_BUILD_MUSA} ]; then
125    # Use qy1 by default (MTT S80)
126    MUSA_ARCH=${MUSA_ARCH:-21}
127    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_MUSA=ON -DMUSA_ARCHITECTURES=${MUSA_ARCH}"
128fi
129
130if [ ! -z ${GG_BUILD_NO_SVE} ]; then
131    # arm 9 and newer enables sve by default, adjust these flags depending on the cpu used
132    CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=armv8.5-a+fp16+i8mm"
133fi
134
135if [ -n "${GG_BUILD_KLEIDIAI}" ]; then
136    echo ">>===== Enabling KleidiAI support"
137
138    CANDIDATES=(
139        "armv9-a+dotprod+i8mm+sve2"
140        "armv9-a+dotprod+i8mm"
141        "armv8.6-a+dotprod+i8mm"
142        "armv8.2-a+dotprod"
143    )
144    CPU=""
145
146    for cpu in "${CANDIDATES[@]}"; do
147        if echo 'int main(){}' | ${CXX:-c++} -march="$cpu" -x c++ - -c -o /dev/null >/dev/null 2>&1; then
148            CPU="$cpu"
149            break
150        fi
151    done
152
153    if [ -z "$CPU" ]; then
154        echo "ERROR: None of the required ARM baselines (armv9/armv8.6/armv8.2 + dotprod) are supported by this compiler."
155        exit 1
156    fi
157
158    echo ">>===== Using ARM baseline: ${CPU}"
159
160    CMAKE_EXTRA="${CMAKE_EXTRA:+$CMAKE_EXTRA } \
161        -DGGML_NATIVE=OFF \
162        -DGGML_CPU_KLEIDIAI=ON \
163        -DGGML_CPU_AARCH64=ON \
164        -DGGML_CPU_ARM_ARCH=${CPU} \
165        -DBUILD_SHARED_LIBS=OFF"
166fi
167
168## helpers
169
170# download a file if it does not exist or if it is outdated
171function gg_wget {
172    local out=$1
173    local url=$2
174
175    local cwd=`pwd`
176
177    mkdir -p $out
178    cd $out
179
180    # should not re-download if file is the same
181    wget -nv -c -N $url
182
183    cd $cwd
184}
185
186function gg_printf {
187    printf -- "$@" >> $OUT/README.md
188}
189
190function gg_run {
191    ci=$1
192
193    set -o pipefail
194    set -x
195
196    gg_run_$ci | tee $OUT/$ci.log
197    cur=$?
198    echo "$cur" > $OUT/$ci.exit
199
200    set +x
201    set +o pipefail
202
203    gg_sum_$ci
204
205    ret=$((ret | cur))
206}
207
208## ci
209
210# ctest_debug
211
212function gg_run_ctest_debug {
213    cd ${SRC}
214
215    rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug
216
217    set -e
218
219    # Check cmake, make and ctest are installed
220    gg_check_build_requirements
221
222    (time cmake -DCMAKE_BUILD_TYPE=Debug ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
223    (time make -j$(nproc)                                  ) 2>&1 | tee -a $OUT/${ci}-make.log
224
225    (time ctest --output-on-failure -L main -E "test-opt|test-backend-ops" ) 2>&1 | tee -a $OUT/${ci}-ctest.log
226
227    set +e
228}
229
230function gg_sum_ctest_debug {
231    gg_printf '### %s\n\n' "${ci}"
232
233    gg_printf 'Runs ctest in debug mode\n'
234    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
235    gg_printf '```\n'
236    gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)"
237    gg_printf '```\n'
238    gg_printf '\n'
239}
240
241# ctest_release
242
243function gg_run_ctest_release {
244    cd ${SRC}
245
246    rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release
247
248    set -e
249
250    # Check cmake, make and ctest are installed
251    gg_check_build_requirements
252
253    (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
254    (time make -j$(nproc)                                    ) 2>&1 | tee -a $OUT/${ci}-make.log
255
256    if [ -z ${GG_BUILD_LOW_PERF} ]; then
257        (time ctest --output-on-failure -L 'main|python' ) 2>&1 | tee -a $OUT/${ci}-ctest.log
258    else
259        (time ctest --output-on-failure -L main -E test-opt ) 2>&1 | tee -a $OUT/${ci}-ctest.log
260    fi
261
262    set +e
263}
264
265function gg_sum_ctest_release {
266    gg_printf '### %s\n\n' "${ci}"
267
268    gg_printf 'Runs ctest in release mode\n'
269    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
270    gg_printf '```\n'
271    gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)"
272    gg_printf '```\n'
273}
274
275# test_scripts
276
277function gg_run_test_scripts {
278    cd ${SRC}
279
280    set -e
281
282    (cd ./tools/gguf-split && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
283    (cd ./tools/quantize   && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
284
285    set +e
286}
287
288function gg_sum_test_scripts {
289    gg_printf '### %s\n\n' "${ci}"
290
291    gg_printf 'Runs test scripts\n'
292    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
293    gg_printf '```\n'
294    gg_printf '%s\n' "$(cat $OUT/${ci}-scripts.log)"
295    gg_printf '```\n'
296    gg_printf '\n'
297}
298
299function gg_get_model {
300    #local gguf_0="$MNT/models/qwen3/0.6B/ggml-model-f16.gguf"
301    local gguf_0="$MNT/models/qwen3/0.6B/ggml-model-q4_0.gguf"
302    if [[ -s $gguf_0 ]]; then
303        echo -n "$gguf_0"
304    else
305        echo >&2 "No model found. Can't run gg_run_ctest_with_model."
306        exit 1
307    fi
308}
309
310function gg_run_ctest_with_model_debug {
311    cd ${SRC}
312
313    local model; model=$(gg_get_model)
314    cd build-ci-debug
315    set -e
316
317    (LLAMACPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log
318
319    set +e
320    cd ..
321}
322
323function gg_run_ctest_with_model_release {
324    cd ${SRC}
325
326    local model; model=$(gg_get_model)
327    cd build-ci-release
328    set -e
329
330    (LLAMACPP_TEST_MODELFILE="$model" time ctest --output-on-failure -L model) 2>&1 | tee -a $OUT/${ci}-ctest.log
331
332    # test memory leaks
333    #if [[ ! -z ${GG_BUILD_METAL} ]]; then
334    #    # TODO: this hangs for some reason ...
335    #    (time leaks -quiet -atExit -- ./bin/test-thread-safety -m $model --parallel 2 -t 2 -p "hello") 2>&1 | tee -a $OUT/${ci}-leaks.log
336    #fi
337
338    set +e
339    cd ..
340}
341
342function gg_sum_ctest_with_model_debug {
343    gg_printf '### %s\n\n' "${ci}"
344
345    gg_printf 'Runs ctest with model files in debug mode\n'
346    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
347    gg_printf '```\n'
348    gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)"
349    gg_printf '```\n'
350}
351
352function gg_sum_ctest_with_model_release {
353    gg_printf '### %s\n\n' "${ci}"
354
355    gg_printf 'Runs ctest with model files in release mode\n'
356    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
357    gg_printf '```\n'
358    gg_printf '%s\n' "$(cat $OUT/${ci}-ctest.log)"
359    gg_printf '```\n'
360}
361
362# qwen3_0_6b
363
364function gg_run_qwen3_0_6b {
365    cd ${SRC}
366
367    gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/config.json
368    gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/tokenizer.json
369    gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/tokenizer_config.json
370   #gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/raw/main/special_tokens_map.json
371    gg_wget models-mnt/qwen3/0.6B/ https://huggingface.co/Qwen/Qwen3-0.6B-Base/resolve/main/model.safetensors
372
373
374    gg_wget models-mnt/wikitext/ https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip
375    unzip -o models-mnt/wikitext/wikitext-2-raw-v1.zip -d models-mnt/wikitext/
376
377    path_models="../models-mnt/qwen3/0.6B"
378    path_wiki="../models-mnt/wikitext/wikitext-2-raw"
379
380    rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release
381
382    set -e
383
384    (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
385    (time make -j$(nproc)                                    ) 2>&1 | tee -a $OUT/${ci}-make.log
386
387    python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf  --outtype f16
388    python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-bf16.gguf --outtype bf16
389
390    model_f16="${path_models}/ggml-model-f16.gguf"
391    model_bf16="${path_models}/ggml-model-bf16.gguf"
392    model_q8_0="${path_models}/ggml-model-q8_0.gguf"
393    model_q4_0="${path_models}/ggml-model-q4_0.gguf"
394    model_q4_1="${path_models}/ggml-model-q4_1.gguf"
395    model_q5_0="${path_models}/ggml-model-q5_0.gguf"
396    model_q5_1="${path_models}/ggml-model-q5_1.gguf"
397    model_q2_k="${path_models}/ggml-model-q2_k.gguf"
398    model_q3_k="${path_models}/ggml-model-q3_k.gguf"
399    model_q4_k="${path_models}/ggml-model-q4_k.gguf"
400    model_q5_k="${path_models}/ggml-model-q5_k.gguf"
401    model_q6_k="${path_models}/ggml-model-q6_k.gguf"
402
403    wiki_test="${path_wiki}/wiki.test.raw"
404
405    ./bin/llama-quantize ${model_bf16} ${model_q8_0} q8_0 $(nproc)
406    ./bin/llama-quantize ${model_bf16} ${model_q4_0} q4_0 $(nproc)
407    ./bin/llama-quantize ${model_bf16} ${model_q4_1} q4_1 $(nproc)
408    ./bin/llama-quantize ${model_bf16} ${model_q5_0} q5_0 $(nproc)
409    ./bin/llama-quantize ${model_bf16} ${model_q5_1} q5_1 $(nproc)
410    ./bin/llama-quantize ${model_bf16} ${model_q2_k} q2_k $(nproc)
411    ./bin/llama-quantize ${model_bf16} ${model_q3_k} q3_k $(nproc)
412    ./bin/llama-quantize ${model_bf16} ${model_q4_k} q4_k $(nproc)
413    ./bin/llama-quantize ${model_bf16} ${model_q5_k} q5_k $(nproc)
414    ./bin/llama-quantize ${model_bf16} ${model_q6_k} q6_k $(nproc)
415
416    (time ./bin/llama-fit-params --model ${model_f16} 2>&1 | tee -a $OUT/${ci}-fp-f16.log)
417
418    (time ./bin/llama-completion -no-cnv --model ${model_f16}  -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
419    (time ./bin/llama-completion -no-cnv --model ${model_bf16} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log
420    (time ./bin/llama-completion -no-cnv --model ${model_q8_0} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
421    (time ./bin/llama-completion -no-cnv --model ${model_q4_0} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
422    (time ./bin/llama-completion -no-cnv --model ${model_q4_1} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
423    (time ./bin/llama-completion -no-cnv --model ${model_q5_0} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
424    (time ./bin/llama-completion -no-cnv --model ${model_q5_1} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
425    (time ./bin/llama-completion -no-cnv --model ${model_q2_k} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
426    (time ./bin/llama-completion -no-cnv --model ${model_q3_k} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
427    (time ./bin/llama-completion -no-cnv --model ${model_q4_k} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
428    (time ./bin/llama-completion -no-cnv --model ${model_q5_k} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
429    (time ./bin/llama-completion -no-cnv --model ${model_q6_k} -ngl 99 -c 1024 -s 1234 -n 64 --ignore-eos -p "I believe the meaning of life is" ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
430
431    (time ./bin/llama-perplexity --model ${model_f16}  -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
432    if [ -z ${GG_BUILD_NO_BF16} ]; then
433        (time ./bin/llama-perplexity --model ${model_bf16} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-bf16.log
434    fi
435    (time ./bin/llama-perplexity --model ${model_q8_0} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
436    (time ./bin/llama-perplexity --model ${model_q4_0} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_0.log
437    (time ./bin/llama-perplexity --model ${model_q4_1} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_1.log
438    (time ./bin/llama-perplexity --model ${model_q5_0} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_0.log
439    (time ./bin/llama-perplexity --model ${model_q5_1} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_1.log
440    (time ./bin/llama-perplexity --model ${model_q2_k} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q2_k.log
441    (time ./bin/llama-perplexity --model ${model_q3_k} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q3_k.log
442    (time ./bin/llama-perplexity --model ${model_q4_k} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q4_k.log
443    (time ./bin/llama-perplexity --model ${model_q5_k} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q5_k.log
444    (time ./bin/llama-perplexity --model ${model_q6_k} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-tg-q6_k.log
445
446    (time ./bin/llama-imatrix --model ${model_f16} -f ${wiki_test} -ngl 99 -c 1024 -b 512 --chunks 2 ) 2>&1 | tee -a $OUT/${ci}-imatrix.log
447
448    (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 1024 -fa off --no-op-offload) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
449    (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 10 -c 1024 -fa on  --no-op-offload) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
450    (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 1024 -fa off                ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
451    (time ./bin/llama-save-load-state --model ${model_q4_0} -ngl 99 -c 1024 -fa on                 ) 2>&1 | tee -a $OUT/${ci}-save-load-state.log
452
453    function check_ppl {
454        qnt="$1"
455        ppl=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
456
457        if [ $(echo "$ppl > 20.0" | bc) -eq 1 ]; then
458            printf '  - %s @ %s (FAIL: ppl > 20.0)\n' "$qnt" "$ppl"
459            return 20
460        fi
461
462        printf '  - %s @ %s OK\n' "$qnt" "$ppl"
463        return 0
464    }
465
466    check_ppl "f16"  "$(cat $OUT/${ci}-tg-f16.log  | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
467    if [ -z ${GG_BUILD_NO_BF16} ]; then
468        check_ppl "bf16" "$(cat $OUT/${ci}-tg-bf16.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
469    fi
470    check_ppl "q8_0" "$(cat $OUT/${ci}-tg-q8_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
471    check_ppl "q4_0" "$(cat $OUT/${ci}-tg-q4_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
472    check_ppl "q4_1" "$(cat $OUT/${ci}-tg-q4_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
473    check_ppl "q5_0" "$(cat $OUT/${ci}-tg-q5_0.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
474    check_ppl "q5_1" "$(cat $OUT/${ci}-tg-q5_1.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
475   #check_ppl "q2_k" "$(cat $OUT/${ci}-tg-q2_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log # note: ppl > 20.0 for this quant and model
476    check_ppl "q3_k" "$(cat $OUT/${ci}-tg-q3_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
477    check_ppl "q4_k" "$(cat $OUT/${ci}-tg-q4_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
478    check_ppl "q5_k" "$(cat $OUT/${ci}-tg-q5_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
479    check_ppl "q6_k" "$(cat $OUT/${ci}-tg-q6_k.log | grep "^\[1\]")" | tee -a $OUT/${ci}-ppl.log
480
481    cat $OUT/${ci}-imatrix.log | grep "Final" >> $OUT/${ci}-imatrix-sum.log
482
483    set +e
484}
485
486function gg_sum_qwen3_0_6b {
487    gg_printf '### %s\n\n' "${ci}"
488
489    gg_printf 'Qwen3 0.6B:\n'
490    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
491    gg_printf '- perplexity:\n%s\n' "$(cat $OUT/${ci}-ppl.log)"
492    gg_printf '- imatrix:\n```\n%s\n```\n' "$(cat $OUT/${ci}-imatrix-sum.log)"
493    gg_printf '- f16:\n```\n%s\n```\n'  "$(cat $OUT/${ci}-tg-f16.log)"
494    if [ -z ${GG_BUILD_NO_BF16} ]; then
495        gg_printf '- bf16:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-bf16.log)"
496    fi
497    gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
498    gg_printf '- q4_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_0.log)"
499    gg_printf '- q4_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_1.log)"
500    gg_printf '- q5_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_0.log)"
501    gg_printf '- q5_1:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_1.log)"
502    gg_printf '- q2_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q2_k.log)"
503    gg_printf '- q3_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q3_k.log)"
504    gg_printf '- q4_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q4_k.log)"
505    gg_printf '- q5_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q5_k.log)"
506    gg_printf '- q6_k:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q6_k.log)"
507    gg_printf '- save-load-state: \n```\n%s\n```\n' "$(cat $OUT/${ci}-save-load-state.log)"
508}
509
510# bge-small
511
512function gg_run_embd_bge_small {
513    cd ${SRC}
514
515    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/config.json
516    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/tokenizer.json
517    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/tokenizer_config.json
518    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/special_tokens_map.json
519    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/resolve/main/pytorch_model.bin
520    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/sentence_bert_config.json
521    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/vocab.txt
522    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/modules.json
523    gg_wget models-mnt/bge-small/ https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/config.json
524
525    gg_wget models-mnt/bge-small/1_Pooling https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/1_Pooling/config.json
526
527    path_models="../models-mnt/bge-small"
528
529    rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release
530
531    set -e
532
533    (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
534    (time make -j$(nproc)                                    ) 2>&1 | tee -a $OUT/${ci}-make.log
535
536    python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
537
538    model_f16="${path_models}/ggml-model-f16.gguf"
539    model_q8_0="${path_models}/ggml-model-q8_0.gguf"
540
541    ./bin/llama-quantize ${model_f16} ${model_q8_0} q8_0
542
543    (time ./bin/llama-fit-params --model ${model_f16} 2>&1 | tee -a $OUT/${ci}-fp-f16.log)
544
545    (time ./bin/llama-embedding --model ${model_f16}  -p "I believe the meaning of life is" -ngl 99 -c 0 --no-op-offload) 2>&1 | tee -a $OUT/${ci}-tg-f16.log
546    (time ./bin/llama-embedding --model ${model_q8_0} -p "I believe the meaning of life is" -ngl 99 -c 0 --no-op-offload) 2>&1 | tee -a $OUT/${ci}-tg-q8_0.log
547
548    set +e
549}
550
551function gg_sum_embd_bge_small {
552    gg_printf '### %s\n\n' "${ci}"
553
554    gg_printf 'BGE Small (BERT):\n'
555    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
556    gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-f16.log)"
557    gg_printf '- q8_0:\n```\n%s\n```\n' "$(cat $OUT/${ci}-tg-q8_0.log)"
558}
559
560# rerank_tiny
561
562function gg_run_rerank_tiny {
563    cd ${SRC}
564
565    gg_wget models-mnt/rerank-tiny/ https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/raw/main/config.json
566    gg_wget models-mnt/rerank-tiny/ https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/raw/main/tokenizer.json
567    gg_wget models-mnt/rerank-tiny/ https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/raw/main/tokenizer_config.json
568    gg_wget models-mnt/rerank-tiny/ https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/raw/main/special_tokens_map.json
569    gg_wget models-mnt/rerank-tiny/ https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/resolve/main/pytorch_model.bin
570    gg_wget models-mnt/rerank-tiny/ https://huggingface.co/jinaai/jina-reranker-v1-tiny-en/raw/main/vocab.json
571
572    path_models="../models-mnt/rerank-tiny"
573
574    rm -rf build-ci-release && mkdir build-ci-release && cd build-ci-release
575
576    set -e
577
578    (time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
579    (time make -j$(nproc)                                    ) 2>&1 | tee -a $OUT/${ci}-make.log
580
581    python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
582
583    model_f16="${path_models}/ggml-model-f16.gguf"
584
585    (time ./bin/llama-fit-params --model ${model_f16} 2>&1 | tee -a $OUT/${ci}-fp-f16.log)
586
587    # for this model, the SEP token is "</s>"
588    (time ./bin/llama-embedding --model ${model_f16} -p "what is panda?\thi\nwhat is panda?\tit's a bear\nwhat is panda?\tThe giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China." -ngl 99 -c 0 --pooling rank --embd-normalize -1 --no-op-offload --verbose-prompt) 2>&1 | tee -a $OUT/${ci}-rk-f16.log
589
590    # sample output
591    # rerank score 0:    0.029
592    # rerank score 1:    0.029
593    # rerank score 2:    0.135
594
595    # check that the score is in the range [$3, $4]
596    function check_score {
597        qnt="$1"
598        score=$(echo "$2" | grep -oE "[0-9]+\.[0-9]+" | tail -n 1)
599
600        if [ $(echo "$score < $3" | bc) -eq 1 ] || [ $(echo "$score > $4" | bc) -eq 1 ]; then
601            printf '  - %s @ %s (FAIL: score not in range [%s, %s])\n' "$qnt" "$score" "$3" "$4"
602            return 20
603        fi
604
605        printf '  - %s @ %s OK\n' "$qnt" "$score"
606        return 0
607    }
608
609    check_score "rerank score 0" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 0")" "0.00" "0.05" | tee -a $OUT/${ci}-rk-f16.log
610    check_score "rerank score 1" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 1")" "0.00" "0.05" | tee -a $OUT/${ci}-rk-f16.log
611    check_score "rerank score 2" "$(cat $OUT/${ci}-rk-f16.log | grep "rerank score 2")" "0.10" "0.30" | tee -a $OUT/${ci}-rk-f16.log
612
613    set +e
614}
615
616function gg_sum_rerank_tiny {
617    gg_printf '### %s\n\n' "${ci}"
618
619    gg_printf 'Rerank Tiny (Jina):\n'
620    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
621    gg_printf '- f16: \n```\n%s\n```\n' "$(cat $OUT/${ci}-rk-f16.log)"
622}
623
624function gg_check_build_requirements {
625    if ! command -v cmake &> /dev/null; then
626        gg_printf 'cmake not found, please install'
627    fi
628
629    if ! command -v make &> /dev/null; then
630        gg_printf 'make not found, please install'
631    fi
632
633    if ! command -v ctest &> /dev/null; then
634        gg_printf 'ctest not found, please install'
635    fi
636}
637
638function gg_run_test_backend_ops_cpu {
639    cd ${SRC}
640
641    cd build-ci-release
642
643    set -e
644
645    (time ./bin/test-backend-ops -b CPU ) 2>&1 | tee -a $OUT/${ci}-test-backend-ops-cpu.log
646
647    set +e
648}
649
650function gg_sum_test_backend_ops_cpu {
651    gg_printf '### %s\n\n' "${ci}"
652
653    gg_printf 'Runs test-backend-ops for CPU backend\n'
654    gg_printf '- status: %s\n' "$(cat $OUT/${ci}.exit)"
655    gg_printf '```\n'
656    gg_printf '%s\n' "$(cat $OUT/${ci}-test-backend-ops-cpu.log)"
657    gg_printf '```\n'
658    gg_printf '\n'
659}
660
661## main
662
663export LLAMA_LOG_PREFIX=1
664export LLAMA_LOG_TIMESTAMPS=1
665
666if [ -z ${GG_BUILD_LOW_PERF} ]; then
667    # Create symlink: ./llama.cpp/models-mnt -> $MNT/models
668    rm -rf ${SRC}/models-mnt
669    mnt_models=${MNT}/models
670    mkdir -p ${mnt_models}
671    ln -sfn ${mnt_models} ${SRC}/models-mnt
672
673    # Create a fresh python3 venv and enter it
674    if ! python3 -m venv "$MNT/venv"; then
675        echo "Error: Failed to create Python virtual environment at $MNT/venv."
676        exit 1
677    fi
678    source "$MNT/venv/bin/activate"
679
680    pip install -r ${SRC}/requirements.txt --disable-pip-version-check
681    pip install --editable gguf-py --disable-pip-version-check
682fi
683
684ret=0
685
686test $ret -eq 0 && gg_run ctest_debug
687test $ret -eq 0 && gg_run ctest_release
688
689if [ ! -z ${GG_BUILD_HIGH_PERF} ]; then
690    test $ret -eq 0 && gg_run test_backend_ops_cpu
691fi
692
693if [ -z ${GG_BUILD_LOW_PERF} ]; then
694    test $ret -eq 0 && gg_run embd_bge_small
695    test $ret -eq 0 && gg_run rerank_tiny
696
697    if [ -z ${GG_BUILD_CLOUD} ] || [ ${GG_BUILD_EXTRA_TESTS_0} ]; then
698        test $ret -eq 0 && gg_run test_scripts
699    fi
700
701    test $ret -eq 0 && gg_run qwen3_0_6b
702
703    test $ret -eq 0 && gg_run ctest_with_model_debug
704    test $ret -eq 0 && gg_run ctest_with_model_release
705fi
706
707cat $OUT/README.md
708
709exit $ret