Revert to ipex-llm version of ollama for gpu acceleration
This commit is contained in:
parent
aec98f6f30
commit
40313a7364
16
Dockerfile
16
Dockerfile
@ -10,6 +10,8 @@ RUN apt update && \
|
|||||||
gnupg \
|
gnupg \
|
||||||
wget \
|
wget \
|
||||||
curl \
|
curl \
|
||||||
|
python3 \
|
||||||
|
python3-pip \
|
||||||
ocl-icd-libopencl1
|
ocl-icd-libopencl1
|
||||||
|
|
||||||
# Intel GPU compute user-space drivers
|
# Intel GPU compute user-space drivers
|
||||||
@ -34,21 +36,21 @@ RUN wget -qO - https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P
|
|||||||
tee /etc/apt/sources.list.d/oneAPI.list && \
|
tee /etc/apt/sources.list.d/oneAPI.list && \
|
||||||
apt update && \
|
apt update && \
|
||||||
apt install --no-install-recommends -q -y \
|
apt install --no-install-recommends -q -y \
|
||||||
intel-oneapi-runtime-libs
|
intel-basekit
|
||||||
|
|
||||||
# Required oneAPI environment variables
|
# Required oneAPI environment variables
|
||||||
ENV USE_XETLA=OFF
|
ENV USE_XETLA=OFF
|
||||||
ENV SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
ENV SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
|
||||||
ENV SYCL_CACHE_PERSISTENT=1
|
ENV SYCL_CACHE_PERSISTENT=1
|
||||||
|
|
||||||
# Ollama
|
COPY _init.sh /usr/share/lib/init_workspace.sh
|
||||||
RUN curl -fsSL https://ollama.com/install.sh | sh
|
COPY _run.sh /usr/share/lib/run_workspace.sh
|
||||||
|
|
||||||
|
# Ollama via ipex-llm
|
||||||
|
RUN pip3 install --pre --upgrade ipex-llm[cpp]
|
||||||
|
|
||||||
ENV OLLAMA_NUM_GPU=999
|
ENV OLLAMA_NUM_GPU=999
|
||||||
ENV OLLAMA_HOST=0.0.0.0:11434
|
ENV OLLAMA_HOST=0.0.0.0:11434
|
||||||
|
|
||||||
# https://github.com/ollama/ollama/issues/1590
|
ENTRYPOINT ["/bin/bash", "/usr/share/lib/run_workspace.sh"]
|
||||||
ENV OLLAMA_INTEL_GPU=1
|
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/ollama", "serve"]
|
|
||||||
|
|
||||||
|
@ -30,6 +30,9 @@ $ docker-compose -f docker-compose-wsl2.yml up
|
|||||||
|
|
||||||
Then launch your web browser to http://localhost:3000 to launch the web ui. Create a local OpenWeb UI credential, then click the settings icon in the top right of the screen, then select 'Models', then click 'Show', then download a model like 'llama3.1:8b-instruct-q8_0' for Intel ARC A770 16GB VRAM
|
Then launch your web browser to http://localhost:3000 to launch the web ui. Create a local OpenWeb UI credential, then click the settings icon in the top right of the screen, then select 'Models', then click 'Show', then download a model like 'llama3.1:8b-instruct-q8_0' for Intel ARC A770 16GB VRAM
|
||||||
|
|
||||||
|
# Known issues
|
||||||
|
* Little effort has been made to prune the packages pulled into the Ollama docker image for Intel GPU
|
||||||
|
|
||||||
# References
|
# References
|
||||||
* https://github.com/ollama/ollama/issues/1590
|
* https://dgpu-docs.intel.com/driver/client/overview.html
|
||||||
* https://github.com/ollama/ollama/pull/3278
|
* https://ipex-llm.readthedocs.io/en/latest/doc/LLM/Quickstart/ollama_quickstart.html
|
||||||
|
Loading…
x
Reference in New Issue
Block a user