Files
llamacpp-qwen3.5-0.8b/Dockerfile
2026-03-27 14:48:52 +00:00

24 lines
673 B
Docker

FROM rocm/dev-ubuntu-22.04:latest
ENV DEBIAN_FRONTEND=noninteractive
ENV HSA_OVERRIDE_GFX_VERSION="11.0.0"
ENV AMDGPU_TARGETS="gfx1100"
ENV HIP_VISIBLE_DEVICES="0"
# Aggiunti hipblas-dev e rocblas-dev necessari per la compilazione GGML_HIP
RUN apt-get update && apt-get install -y \
build-essential cmake git libcurl4-openssl-dev \
hipblas-dev rocblas-dev \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
RUN git clone https://github.com/ggerganov/llama.cpp.git .
RUN cmake -B build -DGGML_HIP=ON -DAMDGPU_TARGETS=gfx1100 -DCMAKE_BUILD_TYPE=Release
RUN cmake --build build --config Release -j $(nproc)
EXPOSE 8000
ENTRYPOINT ["/app/build/bin/llama-server"]