From b333b06772c89d96aacb5490d6a219fba7c09cc6 Mon Sep 17 00:00:00 2001 From: Mitja Felicijan Date: Thu, 12 Feb 2026 20:57:17 +0100 Subject: Engage! --- llama.cpp/requirements/requirements-convert_lora_to_gguf.txt | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 llama.cpp/requirements/requirements-convert_lora_to_gguf.txt (limited to 'llama.cpp/requirements/requirements-convert_lora_to_gguf.txt') diff --git a/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt b/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt new file mode 100644 index 0000000..d091d56 --- /dev/null +++ b/llama.cpp/requirements/requirements-convert_lora_to_gguf.txt @@ -0,0 +1,4 @@ +-r ./requirements-convert_hf_to_gguf.txt +--extra-index-url https://download.pytorch.org/whl/cpu +# torch s390x packages can only be found from nightly builds +--extra-index-url https://download.pytorch.org/whl/nightly -- cgit v1.2.3