Files
llamacpp-qwen3.5-0.8b/Dockerfile
Luca Sacchi Ricciardi f20f6571c2 project refactored
2026-03-27 14:27:12 +00:00

25 lines
684 B
Docker

FROM rocm/dev-ubuntu-22.04:latest
ENV DEBIAN_FRONTEND=noninteractive
ENV HSA_OVERRIDE_GFX_VERSION="11.0.0"
ENV AMDGPU_TARGETS="gfx1100"
ENV HIP_VISIBLE_DEVICES="0"
RUN apt-get update && apt-get install -y \
build-essential cmake git libcurl4-openssl-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Little often: cloniamo e compiliamo
RUN git clone https://github.com/ggerganov/llama.cpp.git .
# Compilazione con supporto HIP nativo per AMD
RUN cmake -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1100 -DCMAKE_BUILD_TYPE=Release
RUN cmake --build build --config Release -j $(nproc)
EXPOSE 8000
# Punto d'ingresso nudo e crudo
ENTRYPOINT ["/app/build/bin/llama-server"]