1#!/usr/bin/env bash
2
3set -e
4
5cd "$(dirname "$0")/.." || exit
6
7# Specify the model you want to use here:
8MODEL="${MODEL:-./models/llama-2-13b-chat.ggmlv3.q5_K_M.bin}"
9PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat-system.txt}
10
11# Adjust to the number of CPU cores you want to use.
12N_THREAD="${N_THREAD:-12}"
13
14# Note: you can also override the generation options by specifying them on the command line:
15GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 4096 --batch-size 1024}"
16
17
18# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
19./llama-server $GEN_OPTIONS \
20 --model "$MODEL" \
21 --threads "$N_THREAD" \
22 --rope-freq-scale 1.0 \
23 "$@"
24
25# I used this to test the model with mps, but omitted it from the general purpose. If you want to use it, just specify it on the command line.
26# -ngl 1 \