diff options
Diffstat (limited to 'llama.cpp/gguf-py/examples')
| -rw-r--r-- | llama.cpp/gguf-py/examples/reader.py | 49 | ||||
| -rwxr-xr-x | llama.cpp/gguf-py/examples/writer.py | 39 |
2 files changed, 88 insertions, 0 deletions
diff --git a/llama.cpp/gguf-py/examples/reader.py b/llama.cpp/gguf-py/examples/reader.py new file mode 100644 index 0000000..703b782 --- /dev/null +++ b/llama.cpp/gguf-py/examples/reader.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +import logging +import sys +from pathlib import Path + +logger = logging.getLogger("reader") + +# Necessary to load the local gguf package +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from gguf.gguf_reader import GGUFReader + + +def read_gguf_file(gguf_file_path): + """ + Reads and prints key-value pairs and tensor information from a GGUF file in an improved format. + + Parameters: + - gguf_file_path: Path to the GGUF file. + """ + + reader = GGUFReader(gguf_file_path) + + # List all key-value pairs in a columnized format + print("Key-Value Pairs:") # noqa: NP100 + max_key_length = max(len(key) for key in reader.fields.keys()) + for key, field in reader.fields.items(): + value = field.parts[field.data[0]] + print(f"{key:{max_key_length}} : {value}") # noqa: NP100 + print("----") # noqa: NP100 + + # List all tensors + print("Tensors:") # noqa: NP100 + tensor_info_format = "{:<30} | Shape: {:<15} | Size: {:<12} | Quantization: {}" + print(tensor_info_format.format("Tensor Name", "Shape", "Size", "Quantization")) # noqa: NP100 + print("-" * 80) # noqa: NP100 + for tensor in reader.tensors: + shape_str = "x".join(map(str, tensor.shape)) + size_str = str(tensor.n_elements) + quantization_str = tensor.tensor_type.name + print(tensor_info_format.format(tensor.name, shape_str, size_str, quantization_str)) # noqa: NP100 + + +if __name__ == '__main__': + if len(sys.argv) < 2: + logger.info("Usage: reader.py <path_to_gguf_file>") + sys.exit(1) + gguf_file_path = sys.argv[1] + read_gguf_file(gguf_file_path) diff --git a/llama.cpp/gguf-py/examples/writer.py b/llama.cpp/gguf-py/examples/writer.py new file mode 100755 index 0000000..731873a --- /dev/null +++ b/llama.cpp/gguf-py/examples/writer.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +import sys +from pathlib import Path + +import numpy as np + +# Necessary to load the local gguf package +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from gguf import GGUFWriter # noqa: E402 + + +# Example usage: +def writer_example() -> None: + # Example usage with a file + gguf_writer = GGUFWriter("example.gguf", "llama") + + gguf_writer.add_block_count(12) + gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer + gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float + gguf_writer.add_custom_alignment(64) + + tensor1 = np.ones((32,), dtype=np.float32) * 100.0 + tensor2 = np.ones((64,), dtype=np.float32) * 101.0 + tensor3 = np.ones((96,), dtype=np.float32) * 102.0 + + gguf_writer.add_tensor("tensor1", tensor1) + gguf_writer.add_tensor("tensor2", tensor2) + gguf_writer.add_tensor("tensor3", tensor3) + + gguf_writer.write_header_to_file() + gguf_writer.write_kv_data_to_file() + gguf_writer.write_tensors_to_file() + + gguf_writer.close() + + +if __name__ == '__main__': + writer_example() |
