1MAKEFLAGS += --no-print-directory
2
3define validate_model_path
4 @if [ -z "$(MODEL_PATH)" ]; then \
5 echo "Error: MODEL_PATH must be provided either as:"; \
6 echo " 1. Environment variable: export MODEL_PATH=/path/to/model"; \
7 echo " 2. Command line argument: make $(1) MODEL_PATH=/path/to/model"; \
8 exit 1; \
9 fi
10endef
11
12define validate_embedding_model_path
13 @if [ -z "$(EMBEDDING_MODEL_PATH)" ]; then \
14 echo "Error: EMBEDDING_MODEL_PATH must be provided either as:"; \
15 echo " 1. Environment variable: export EMBEDDING_MODEL_PATH=/path/to/model"; \
16 echo " 2. Command line argument: make $(1) EMBEDDING_MODEL_PATH=/path/to/model"; \
17 exit 1; \
18 fi
19endef
20
21define quantize_model
22 @CONVERTED_MODEL="$(1)" QUANTIZED_TYPE="$(QUANTIZED_TYPE)" \
23 TOKEN_EMBD_TYPE="$(TOKEN_EMBD_TYPE)" OUTPUT_TYPE="$(OUTPUT_TYPE)" \
24 ./scripts/utils/quantize.sh "$(1)" "$(QUANTIZED_TYPE)" "$(TOKEN_EMBD_TYPE)" "$(OUTPUT_TYPE)"
25 @echo "Export the quantized model path to $(2) variable in your environment"
26endef
27
28DEVICE ?= auto
29
30###
31### Casual Model targets/recipes
32###
33causal-convert-model-bf16: OUTTYPE=bf16
34causal-convert-model-bf16: causal-convert-model
35
36causal-convert-model-debug: DEBUG=--debug
37causal-convert-model-debug: causal-convert-model
38
39causal-convert-model:
40 $(call validate_model_path,causal-convert-model)
41 @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(OUTTYPE)" MODEL_PATH="$(MODEL_PATH)" \
42 METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
43 ./scripts/causal/convert-model.sh $(DEBUG)
44
45causal-convert-mm-model-bf16: OUTTYPE=bf16
46causal-convert-mm-model-bf16: MM_OUTTYPE=f16
47causal-convert-mm-model-bf16: causal-convert-mm-model
48
49causal-convert-mm-model:
50 $(call validate_model_path,causal-convert-mm-model)
51 @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(OUTTYPE)" MODEL_PATH="$(MODEL_PATH)" \
52 METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
53 ./scripts/causal/convert-model.sh
54
55 @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(MM_OUTTYPE)" MODEL_PATH="$(MODEL_PATH)" \
56 METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
57 ./scripts/causal/convert-model.sh --mmproj
58
59causal-run-original-model:
60 $(call validate_model_path,causal-run-original-model)
61 @MODEL_PATH="$(MODEL_PATH)" ./scripts/causal/run-org-model.py --device "$(DEVICE)"
62
63causal-run-converted-model:
64 @CONVERTED_MODEL="$(CONVERTED_MODEL)" ./scripts/causal/run-converted-model.sh
65
66causal-verify-logits: causal-run-original-model causal-run-converted-model
67 @MODEL_PATH="$(MODEL_PATH)" ./scripts/causal/compare-logits.py
68 @MODEL_PATH="$(MODEL_PATH)" ./scripts/utils/check-nmse.py -m ${MODEL_PATH}
69
70causal-run-original-embeddings:
71 @./scripts/causal/run-casual-gen-embeddings-org.py
72
73causal-run-converted-embeddings:
74 @./scripts/causal/run-converted-model-embeddings-logits.sh
75
76causal-verify-embeddings: causal-run-original-embeddings causal-run-converted-embeddings
77 @./scripts/causal/compare-embeddings-logits.sh
78
79causal-inspect-original-model:
80 @./scripts/utils/inspect-org-model.py
81
82causal-inspect-converted-model:
83 @./scripts/utils/inspect-converted-model.sh
84
85causal-start-embedding-server:
86 @./scripts/utils/run-embedding-server.sh ${CONVERTED_MODEL}
87
88causal-curl-embedding-endpoint: causal-run-original-embeddings
89 @./scripts/utils/curl-embedding-server.sh | ./scripts/causal/compare-embeddings-logits.sh
90
91causal-quantize-Q8_0: QUANTIZED_TYPE = Q8_0
92causal-quantize-Q8_0: causal-quantize-model
93
94causal-quantize-Q4_0: QUANTIZED_TYPE = Q4_0
95causal-quantize-Q4_0: causal-quantize-model
96
97# For Quantization Aware Trained (QAT) models in Q4_0 we explicitly set the
98# token embedding and output types to Q8_0 instead of the default Q6_K.
99causal-quantize-qat-Q4_0: QUANTIZED_TYPE = Q4_0
100causal-quantize-qat-Q4_0: TOKEN_EMBD_TYPE = Q8_0
101causal-quantize-qat-Q4_0: OUTPUT_TYPE = Q8_0
102causal-quantize-qat-Q4_0: causal-quantize-model
103
104causal-quantize-model:
105 $(call quantize_model,$(CONVERTED_MODEL),QUANTIZED_MODEL)
106
107causal-run-quantized-model:
108 @QUANTIZED_MODEL="$(QUANTIZED_MODEL)" ./scripts/causal/run-converted-model.sh ${QUANTIZED_MODEL}
109
110
111###
112### Embedding Model targets/recipes
113###
114
115embedding-convert-model-bf16: OUTTYPE=bf16
116embedding-convert-model-bf16: embedding-convert-model
117
118embedding-convert-model:
119 $(call validate_embedding_model_path,embedding-convert-model)
120 @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(OUTTYPE)" MODEL_PATH="$(EMBEDDING_MODEL_PATH)" \
121 METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
122 ./scripts/embedding/convert-model.sh
123
124embedding-convert-model-st:
125 $(call validate_embedding_model_path,embedding-convert-model-st)
126 @MODEL_NAME="$(MODEL_NAME)" OUTTYPE="$(OUTTYPE)" MODEL_PATH="$(EMBEDDING_MODEL_PATH)" \
127 METADATA_OVERRIDE="$(METADATA_OVERRIDE)" \
128 ./scripts/embedding/convert-model.sh -st
129
130embedding-run-original-model:
131 $(call validate_embedding_model_path,embedding-run-original-model)
132 @EMBEDDING_MODEL_PATH="$(EMBEDDING_MODEL_PATH)" \
133 USE_SENTENCE_TRANSFORMERS="$(USE_SENTENCE_TRANSFORMERS)" \
134 ./scripts/embedding/run-original-model.py \
135 $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)") \
136 $(if $(USE_SENTENCE_TRANSFORMERS),--use-sentence-transformers)
137
138embedding-run-original-model-st: USE_SENTENCE_TRANSFORMERS=1
139embedding-run-original-model-st: embedding-run-original-model
140
141embedding-run-converted-model:
142 @./scripts/embedding/run-converted-model.sh $(CONVERTED_EMBEDDING_MODEL) \
143 $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)") \
144 $(if $(EMBD_NORMALIZE),--embd-normalize "$(EMBD_NORMALIZE)")
145
146embedding-verify-logits: embedding-run-original-model embedding-run-converted-model
147 @./scripts/embedding/compare-embeddings-logits.sh \
148 $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)")
149
150embedding-verify-logits-st: embedding-run-original-model-st embedding-run-converted-model
151 @./scripts/embedding/compare-embeddings-logits.sh \
152 $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)")
153
154embedding-inspect-original-model:
155 $(call validate_embedding_model_path,embedding-inspect-original-model)
156 @EMBEDDING_MODEL_PATH="$(EMBEDDING_MODEL_PATH)" ./scripts/utils/inspect-org-model.py -m ${EMBEDDING_MODEL_PATH}
157
158embedding-inspect-converted-model:
159 @CONVERTED_EMBEDDING_MODEL="$(CONVERTED_EMBEDDING_MODEL)" ./scripts/utils/inspect-converted-model.sh ${CONVERTED_EMBEDDING_MODEL}
160
161embedding-start-embedding-server:
162 @./scripts/utils/run-embedding-server.sh ${CONVERTED_EMBEDDING_MODEL}
163
164embedding-curl-embedding-endpoint:
165 @./scripts/utils/curl-embedding-server.sh | ./scripts/embedding/compare-embeddings-logits.sh
166
167embedding-quantize-Q8_0: QUANTIZED_TYPE = Q8_0
168embedding-quantize-Q8_0: embedding-quantize-model
169
170embedding-quantize-Q4_0: QUANTIZED_TYPE = Q4_0
171embedding-quantize-Q4_0: embedding-quantize-model
172
173# For Quantization Aware Trained (QAT) models in Q4_0 we explicitly set the
174# token embedding and output types to Q8_0 instead of the default Q6_K.
175embedding-quantize-qat-Q4_0: QUANTIZED_TYPE = Q4_0
176embedding-quantize-qat-Q4_0: TOKEN_EMBD_TYPE = Q8_0
177embedding-quantize-qat-Q4_0: OUTPUT_TYPE = Q8_0
178embedding-quantize-qat-Q4_0: embedding-quantize-model
179
180embedding-quantize-model:
181 $(call quantize_model,$(CONVERTED_EMBEDDING_MODEL),QUANTIZED_EMBEDDING_MODEL)
182
183embedding-run-quantized-model:
184 @./scripts/embedding/run-converted-model.sh $(QUANTIZED_EMBEDDING_MODEL) \
185 $(if $(PROMPTS_FILE),--prompts-file "$(PROMPTS_FILE)")
186
187###
188### Perplexity targets/recipes
189###
190perplexity-data-gen:
191 CONVERTED_MODEL="$(CONVERTED_MODEL)" ./scripts/utils/perplexity-gen.sh
192
193perplexity-run-full:
194 QUANTIZED_MODEL="$(QUANTIZED_MODEL)" LOOGITS_FILE="$(LOGITS_FILE)" \
195 ./scripts/utils/perplexity-run.sh
196
197perplexity-run:
198 QUANTIZED_MODEL="$(QUANTIZED_MODEL)" ./scripts/utils/perplexity-run-simple.sh
199
200###
201### HuggingFace targets/recipes
202###
203
204hf-create-model:
205 @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}"
206
207hf-create-model-dry-run:
208 @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -d
209
210hf-create-model-embedding:
211 @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -e
212
213hf-create-model-embedding-dry-run:
214 @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -e -d
215
216hf-create-model-private:
217 @./scripts/utils/hf-create-model.py -m "${MODEL_NAME}" -ns "${NAMESPACE}" -b "${ORIGINAL_BASE_MODEL}" -p
218
219hf-upload-gguf-to-model:
220 @./scripts/utils/hf-upload-gguf-model.py -m "${MODEL_PATH}" -r "${REPO_ID}" -o "${NAME_IN_REPO}"
221
222hf-create-collection:
223 @./scripts/utils/hf-create-collection.py -n "${NAME}" -d "${DESCRIPTION}" -ns "${NAMESPACE}"
224
225hf-add-model-to-collection:
226 @./scripts/utils/hf-add-model-to-collection.py -c "${COLLECTION}" -m "${MODEL}"
227
228
229.PHONY: clean
230clean:
231 @${RM} -rf data .converted_embedding_model.txt .converted_model.txt .embedding_model_name.txt .model_name.txt
232