1# SRPM for building from source and packaging an RPM for RPM-based distros.
 2# https://docs.fedoraproject.org/en-US/quick-docs/creating-rpm-packages
 3# Built and maintained by John Boero - boeroboy@gmail.com
 4# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
 5
 6# Notes for llama.cpp:
 7# 1. Tags are currently based on hash - which will not sort asciibetically.
 8#    We need to declare standard versioning if people want to sort latest releases.
 9# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
10# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
11#    Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
12# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
13#    It is up to the user to install the correct vendor-specific support.
14
15Name:           llama.cpp-cuda
16Version:        %( date "+%%Y%%m%%d" )
17Release:        1%{?dist}
18Summary:        CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
19License:        MIT
20Source0:        https://github.com/ggml-org/llama.cpp/archive/refs/heads/master.tar.gz
21BuildRequires:  coreutils make gcc-c++ git cuda-toolkit
22Requires:       cuda-toolkit
23URL:            https://github.com/ggml-org/llama.cpp
24
25%define debug_package %{nil}
26%define source_date_epoch_from_changelog 0
27
28%description
29CPU inference for Meta's Lllama2 models using default options.
30
31%prep
32%setup -n llama.cpp-master
33
34%build
35make -j GGML_CUDA=1
36
37%install
38mkdir -p %{buildroot}%{_bindir}/
39cp -p llama-cli %{buildroot}%{_bindir}/llama-cuda-cli
40cp -p llama-completion %{buildroot}%{_bindir}/llama-cuda-completion
41cp -p llama-server %{buildroot}%{_bindir}/llama-cuda-server
42cp -p llama-simple %{buildroot}%{_bindir}/llama-cuda-simple
43
44mkdir -p %{buildroot}/usr/lib/systemd/system
45%{__cat} <<EOF  > %{buildroot}/usr/lib/systemd/system/llamacuda.service
46[Unit]
47Description=Llama.cpp server, CPU only (no GPU support in this build).
48After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.target
49
50[Service]
51Type=simple
52EnvironmentFile=/etc/sysconfig/llama
53ExecStart=/usr/bin/llama-cuda-server $LLAMA_ARGS
54ExecReload=/bin/kill -s HUP $MAINPID
55Restart=never
56
57[Install]
58WantedBy=default.target
59EOF
60
61mkdir -p %{buildroot}/etc/sysconfig
62%{__cat} <<EOF  > %{buildroot}/etc/sysconfig/llama
63LLAMA_ARGS="-m /opt/llama2/ggml-model-f32.bin"
64EOF
65
66%clean
67rm -rf %{buildroot}
68rm -rf %{_builddir}/*
69
70%files
71%{_bindir}/llama-cuda-cli
72%{_bindir}/llama-cuda-completion
73%{_bindir}/llama-cuda-server
74%{_bindir}/llama-cuda-simple
75/usr/lib/systemd/system/llamacuda.service
76%config /etc/sysconfig/llama
77
78%pre
79
80%post
81
82%preun
83%postun
84
85%changelog