1# The flake interface to llama.cpp's Nix expressions. The flake is used as a
2# more discoverable entry-point, as well as a way to pin the dependencies and
3# expose default outputs, including the outputs built by the CI.
4
5# For more serious applications involving some kind of customization you may
6# want to consider consuming the overlay, or instantiating `llamaPackages`
7# directly:
8#
9# ```nix
10# pkgs.callPackage ${llama-cpp-root}/.devops/nix/scope.nix { }`
11# ```
12
13# Cf. https://jade.fyi/blog/flakes-arent-real/ for a more detailed exposition
14# of the relation between Nix and the Nix Flakes.
15{
16 description = "Port of Facebook's LLaMA model in C/C++";
17
18 inputs = {
19 nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
20 flake-parts.url = "github:hercules-ci/flake-parts";
21 };
22
23 # There's an optional binary cache available. The details are below, but they're commented out.
24 #
25 # Why? The terrible experience of being prompted to accept them on every single Nix command run.
26 # Plus, there are warnings shown about not being a trusted user on a default Nix install
27 # if you *do* say yes to the prompts.
28 #
29 # This experience makes having `nixConfig` in a flake a persistent UX problem.
30 #
31 # To make use of the binary cache, please add the relevant settings to your `nix.conf`.
32 # It's located at `/etc/nix/nix.conf` on non-NixOS systems. On NixOS, adjust the `nix.settings`
33 # option in your NixOS configuration to add `extra-substituters` and `extra-trusted-public-keys`,
34 # as shown below.
35 #
36 # ```
37 # nixConfig = {
38 # extra-substituters = [
39 # # A development cache for nixpkgs imported with `config.cudaSupport = true`.
40 # # Populated by https://hercules-ci.com/github/SomeoneSerge/nixpkgs-cuda-ci.
41 # # This lets one skip building e.g. the CUDA-enabled openmpi.
42 # # TODO: Replace once nix-community obtains an official one.
43 # "https://cuda-maintainers.cachix.org"
44 # ];
45 #
46 # # Verify these are the same keys as published on
47 # # - https://app.cachix.org/cache/cuda-maintainers
48 # extra-trusted-public-keys = [
49 # "cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E="
50 # ];
51 # };
52 # ```
53
54 # For inspection, use `nix flake show github:ggml-org/llama.cpp` or the nix repl:
55 #
56 # ```bash
57 # ❯ nix repl
58 # nix-repl> :lf github:ggml-org/llama.cpp
59 # Added 13 variables.
60 # nix-repl> outputs.apps.x86_64-linux.quantize
61 # { program = "/nix/store/00000000000000000000000000000000-llama.cpp/bin/llama-quantize"; type = "app"; }
62 # ```
63 outputs =
64 { self, flake-parts, ... }@inputs:
65 let
66 # We could include the git revisions in the package names but those would
67 # needlessly trigger rebuilds:
68 # llamaVersion = self.dirtyShortRev or self.shortRev;
69
70 # Nix already uses cryptographic hashes for versioning, so we'll just fix
71 # the fake semver for now:
72 llamaVersion = "0.0.0";
73 in
74 flake-parts.lib.mkFlake { inherit inputs; }
75
76 {
77
78 imports = [
79 .devops/nix/nixpkgs-instances.nix
80 .devops/nix/apps.nix
81 .devops/nix/devshells.nix
82 .devops/nix/jetson-support.nix
83 ];
84
85 # An overlay can be used to have a more granular control over llama-cpp's
86 # dependencies and configuration, than that offered by the `.override`
87 # mechanism. Cf. https://nixos.org/manual/nixpkgs/stable/#chap-overlays.
88 #
89 # E.g. in a flake:
90 # ```
91 # { nixpkgs, llama-cpp, ... }:
92 # let pkgs = import nixpkgs {
93 # overlays = [ (llama-cpp.overlays.default) ];
94 # system = "aarch64-linux";
95 # config.allowUnfree = true;
96 # config.cudaSupport = true;
97 # config.cudaCapabilities = [ "7.2" ];
98 # config.cudaEnableForwardCompat = false;
99 # }; in {
100 # packages.aarch64-linux.llamaJetsonXavier = pkgs.llamaPackages.llama-cpp;
101 # }
102 # ```
103 #
104 # Cf. https://nixos.org/manual/nix/unstable/command-ref/new-cli/nix3-flake.html?highlight=flake#flake-format
105 flake.overlays.default = (
106 final: prev: {
107 llamaPackages = final.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
108 inherit (final.llamaPackages) llama-cpp;
109 }
110 );
111
112 systems = [
113 "aarch64-darwin"
114 "aarch64-linux"
115 "x86_64-darwin" # x86_64-darwin isn't tested (and likely isn't relevant)
116 "x86_64-linux"
117 ];
118
119 perSystem =
120 {
121 config,
122 lib,
123 system,
124 pkgs,
125 pkgsCuda,
126 pkgsRocm,
127 ...
128 }:
129 {
130 # For standardised reproducible formatting with `nix fmt`
131 formatter = pkgs.nixfmt-rfc-style;
132
133 # Unlike `.#packages`, legacyPackages may contain values of
134 # arbitrary types (including nested attrsets) and may even throw
135 # exceptions. This attribute isn't recursed into by `nix flake
136 # show` either.
137 #
138 # You can add arbitrary scripts to `.devops/nix/scope.nix` and
139 # access them as `nix build .#llamaPackages.${scriptName}` using
140 # the same path you would with an overlay.
141 legacyPackages = {
142 llamaPackages = pkgs.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
143 llamaPackagesWindows = pkgs.pkgsCross.mingwW64.callPackage .devops/nix/scope.nix {
144 inherit llamaVersion;
145 };
146 llamaPackagesCuda = pkgsCuda.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
147 llamaPackagesRocm = pkgsRocm.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
148 };
149
150 # We don't use the overlay here so as to avoid making too many instances of nixpkgs,
151 # cf. https://zimbatm.com/notes/1000-instances-of-nixpkgs
152 packages =
153 {
154 default = config.legacyPackages.llamaPackages.llama-cpp;
155 vulkan = config.packages.default.override { useVulkan = true; };
156 windows = config.legacyPackages.llamaPackagesWindows.llama-cpp;
157 python-scripts = config.legacyPackages.llamaPackages.python-scripts;
158 }
159 // lib.optionalAttrs pkgs.stdenv.isLinux {
160 cuda = config.legacyPackages.llamaPackagesCuda.llama-cpp;
161
162 mpi-cpu = config.packages.default.override { useMpi = true; };
163 mpi-cuda = config.packages.default.override { useMpi = true; };
164 }
165 // lib.optionalAttrs (system == "x86_64-linux") {
166 rocm = config.legacyPackages.llamaPackagesRocm.llama-cpp;
167 };
168
169 # Packages exposed in `.#checks` will be built by the CI and by
170 # `nix flake check`.
171 #
172 # We could test all outputs e.g. as `checks = confg.packages`.
173 #
174 # TODO: Build more once https://github.com/ggml-org/llama.cpp/issues/6346 has been addressed
175 checks = {
176 inherit (config.packages) default vulkan;
177 };
178 };
179 };
180}