aboutsummaryrefslogtreecommitdiff
path: root/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/requirements/requirements-convert_hf_to_gguf.txt
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/requirements/requirements-convert_hf_to_gguf.txt')
-rw-r--r--llama.cpp/requirements/requirements-convert_hf_to_gguf.txt9
1 files changed, 9 insertions, 0 deletions
diff --git a/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt b/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt
new file mode 100644
index 0000000..122b478
--- /dev/null
+++ b/llama.cpp/requirements/requirements-convert_hf_to_gguf.txt
@@ -0,0 +1,9 @@
1-r ./requirements-convert_legacy_llama.txt
2--extra-index-url https://download.pytorch.org/whl/cpu
3
4## Embedding Gemma requires PyTorch 2.6.0 or later
5torch~=2.6.0; platform_machine != "s390x"
6
7# torch s390x packages can only be found from nightly builds
8--extra-index-url https://download.pytorch.org/whl/nightly
9torch>=0.0.0.dev0; platform_machine == "s390x"