We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents e273eae + f60606d commit 7409fdcCopy full SHA for 7409fdc
model_servers/llamacpp_python/cuda/Containerfile
@@ -4,7 +4,7 @@ RUN dnf install -y gcc-toolset-13-gcc gcc-toolset-13-gcc-c++
4
USER 1001
5
WORKDIR /locallm
6
COPY src .
7
-ENV CMAKE_ARGS="-DLLAMA_CUBLAS=on -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DLLAMA_F16C=OFF"
+ENV CMAKE_ARGS="-DGGML_CUDA=on -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DLLAMA_F16C=OFF"
8
ENV FORCE_CMAKE=1
9
RUN CC="/opt/rh/gcc-toolset-13/root/usr/bin/gcc" CXX="/opt/rh/gcc-toolset-13/root/usr/bin/g++" pip install --no-cache-dir -r ./requirements.txt
10
ENTRYPOINT [ "sh", "run.sh" ]
0 commit comments