summaryrefslogtreecommitdiff
path: root/llama.cpp/.devops/intel.Dockerfile
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/.devops/intel.Dockerfile
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/.devops/intel.Dockerfile')
-rw-r--r--llama.cpp/.devops/intel.Dockerfile95
1 files changed, 95 insertions, 0 deletions
diff --git a/llama.cpp/.devops/intel.Dockerfile b/llama.cpp/.devops/intel.Dockerfile
new file mode 100644
index 0000000..35ea4ad
--- /dev/null
+++ b/llama.cpp/.devops/intel.Dockerfile
@@ -0,0 +1,95 @@
+ARG ONEAPI_VERSION=2025.2.2-0-devel-ubuntu24.04
+
+## Build Image
+
+FROM intel/deep-learning-essentials:$ONEAPI_VERSION AS build
+
+ARG GGML_SYCL_F16=OFF
+RUN apt-get update && \
+ apt-get install -y git libssl-dev
+
+WORKDIR /app
+
+COPY . .
+
+RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
+ echo "GGML_SYCL_F16 is set" \
+ && export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
+ fi && \
+ echo "Building with dynamic libs" && \
+ cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_BUILD_TESTS=OFF ${OPT_SYCL_F16} && \
+ cmake --build build --config Release -j$(nproc)
+
+RUN mkdir -p /app/lib && \
+ find build -name "*.so*" -exec cp -P {} /app/lib \;
+
+RUN mkdir -p /app/full \
+ && cp build/bin/* /app/full \
+ && cp *.py /app/full \
+ && cp -r gguf-py /app/full \
+ && cp -r requirements /app/full \
+ && cp requirements.txt /app/full \
+ && cp .devops/tools.sh /app/full/tools.sh
+
+FROM intel/deep-learning-essentials:$ONEAPI_VERSION AS base
+
+RUN apt-get update \
+ && apt-get install -y libgomp1 curl\
+ && apt autoremove -y \
+ && apt clean -y \
+ && rm -rf /tmp/* /var/tmp/* \
+ && find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete \
+ && find /var/cache -type f -delete
+
+### Full
+FROM base AS full
+
+COPY --from=build /app/lib/ /app
+COPY --from=build /app/full /app
+
+WORKDIR /app
+
+RUN apt-get update && \
+ apt-get install -y \
+ git \
+ python3 \
+ python3-pip \
+ python3-venv && \
+ python3 -m venv /opt/venv && \
+ . /opt/venv/bin/activate && \
+ pip install --upgrade pip setuptools wheel && \
+ pip install -r requirements.txt && \
+ apt autoremove -y && \
+ apt clean -y && \
+ rm -rf /tmp/* /var/tmp/* && \
+ find /var/cache/apt/archives /var/lib/apt/lists -not -name lock -type f -delete && \
+ find /var/cache -type f -delete
+
+ENV PATH="/opt/venv/bin:$PATH"
+
+ENTRYPOINT ["/app/tools.sh"]
+
+### Light, CLI only
+FROM base AS light
+
+COPY --from=build /app/lib/ /app
+COPY --from=build /app/full/llama-cli /app/full/llama-completion /app
+
+WORKDIR /app
+
+ENTRYPOINT [ "/app/llama-cli" ]
+
+### Server, Server only
+FROM base AS server
+
+ENV LLAMA_ARG_HOST=0.0.0.0
+
+COPY --from=build /app/lib/ /app
+COPY --from=build /app/full/llama-server /app
+
+WORKDIR /app
+
+HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
+
+ENTRYPOINT [ "/app/llama-server" ]
+