DRAFT ARTICLE
based on https://github.com/simonlui/Docker_IPEX_ComfyUI
https://github.com/comfyanonymous/ComfyUI
Run Docker based on Ubuntu 22
git clone https://github.com/simonlui/Docker_IPEX_ComfyUI.git cd Docker_IPEX_ComfyUI vi Dockerfile docker build -t ipex-arc-comfy:latest -f Dockerfile . --no-cache docker rm comfy-server docker run -it --device /dev/dri --device /dev/accel --name comfy-server --network=host --security-opt=label=disable -v /docker/confyui/app:/ComfyUI:Z -v /docker/confyui/deps:/deps -v /docker/confyui/huggingface:/root/.cache/huggingface ipex-arc-comfy:latest
Modification for it to work with Ubuntu 24 image
Dockerfile
# SPDX-License-Identifier: Apache-2.0
ARG UBUNTU_VERSION=24.04
FROM ubuntu:${UBUNTU_VERSION} AS oneapi-lib-installer
# Make sure Dockerfile doesn't succeed if there are errors.
RUN ["/bin/sh", "-c", "/bin/bash", "-o", "pipefail", "-c"]
# Install prerequisites to install oneAPI runtime libraries.
# hadolint ignore=DL3008
RUN apt-get update && \
apt-get install -y --no-install-recommends --fix-missing \
ca-certificates \
gnupg2 \
gpg-agent \
unzip \
wget
# hadolint ignore=DL4006
RUN wget --progress=dot:giga -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
| gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \
echo 'deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main' \
| tee /etc/apt/sources.list.d/oneAPI.list
# intel-oneapi-compiler-shared-common provides `sycl-ls` and other utilities like the compiler
RUN apt-get update && \
apt-get install -y --no-install-recommends --fix-missing \
intel-oneapi-dpcpp-cpp-2024.2\
intel-oneapi-compiler-shared-common-2024.2 \
intel-oneapi-runtime-dpcpp-cpp-2024 \
intel-oneapi-runtime-mkl-2024 && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Add and prepare Intel Graphics driver index. This is dependent on being able to pass your GPU with a working driver on the host side where the image will run.
# hadolint ignore=DL4006
RUN https_proxy=http://proxy.example.com:3128/ wget --progress=dot:giga -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg
# hadolint ignore=DL4006
RUN echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu noble client" | \
tee /etc/apt/sources.list.d/intel.gpu.noble.list
ARG UBUNTU_VERSION=24.04
FROM ubuntu:${UBUNTU_VERSION}
# Copy all the files from the oneAPI runtime libraries image into the actual final image.
RUN mkdir -p /opt/intel
COPY --from=oneapi-lib-installer /opt/intel/ /opt/intel/
COPY --from=oneapi-lib-installer /usr/share/keyrings/intel-graphics.gpg /usr/share/keyrings/intel-graphics.gpg
COPY --from=oneapi-lib-installer /etc/apt/sources.list.d/intel.gpu.noble.list /etc/apt/sources.list.d/intel.gpu.noble.list
# Set apt install to not be interactive for some packages that require it.
ENV DEBIAN_FRONTEND=noninteractive
# Set oneAPI library environment variable
ENV LD_LIBRARY_PATH=/opt/intel/oneapi/redist/lib:/opt/intel/oneapi/redist/lib/intel64:$LD_LIBRARY_PATH
# Install certificate authorities to get access to secure connections to other places for downloads and other packages.
# hadolint ignore=DL3008
RUN apt-get update && \
apt-get install -y --no-install-recommends --fix-missing \
build-essential \
ca-certificates \
fonts-noto \
git \
gnupg2 \
gpg-agent \
software-properties-common \
wget && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Sets versions of Level-Zero, OpenCL and memory allocator chosen.
ARG ALLOCATOR=tcmalloc
ENV ALLOCATOR=${ALLOCATOR}
ARG ALLOCATOR_PACKAGE=libgoogle-perftools-dev
ARG ALLOCATOR_LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc.so
RUN if [ "${ALLOCATOR}" = "jemalloc" ] ; then \
ALLOCATOR_PACKAGE=libjemalloc-dev; \
ALLOCATOR_LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so; \
fi
# Install Level-Zero and OpenCL backends.
RUN https_proxy=http://proxy.example.com:3128/ apt-get update && \
https_proxy=http://proxy.example.com:3128/ apt-get install -y --no-install-recommends --fix-missing \
libze-intel-gpu1 intel-ocloc-dev intel-ocloc && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install Python and other associated packages from PPA since default is 3.10
ARG PYTHON=python3.12
# hadolint ignore=DL3008
RUN https_proxy=http://proxy.example.com:3128/ apt-get update && \
https_proxy=http://proxy.example.com:3128/ apt-get install -y --no-install-recommends --fix-missing \
python3 \
python3-dev \
python3-venv \
python3-pip \
python3-setuptools && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Softlink Python to make it default.
RUN ln -sf "$(which ${PYTHON})" /usr/local/bin/python && \
ln -sf "$(which ${PYTHON})" /usr/local/bin/python3 && \
ln -sf "$(which ${PYTHON})" /usr/bin/python && \
ln -sf "$(which ${PYTHON})" /usr/bin/python3
# Install Comfy UI/Pytorch dependencies.
# hadolint ignore=DL3008
RUN https_proxy=http://proxy.example.com:3128/ apt-get update && \
https_proxy=http://proxy.example.com:3128/ apt-get install -y --no-install-recommends --fix-missing \
${ALLOCATOR_PACKAGE} \
libgl1 \
libglib2.0-0 \
libgomp1 \
level-zero \
ocl-icd-libopencl1 \
numactl && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Getting the latest versions of Intel's Compute Runtime and associated packages on Github and installing it will update everything we installed before.
RUN mkdir neo
WORKDIR /neo
RUN wget --progress=dot:giga https://github.com/intel/intel-graphics-compiler/releases/download/v2.11.7/intel-igc-core-2_2.11.7+19146_amd64.deb && \
wget --progress=dot:giga https://github.com/intel/intel-graphics-compiler/releases/download/v2.11.7/intel-igc-opencl-2_2.11.7+19146_amd64.deb && \
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/25.18.33578.6/intel-ocloc-dbgsym_25.18.33578.6-0_amd64.ddeb && \
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/25.18.33578.6/intel-ocloc_25.18.33578.6-0_amd64.deb && \
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/25.18.33578.6/intel-opencl-icd-dbgsym_25.18.33578.6-0_amd64.ddeb && \
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/25.18.33578.6/intel-opencl-icd_25.18.33578.6-0_amd64.deb && \
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/25.18.33578.6/libigdgmm12_22.7.0_amd64.deb && \
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/25.18.33578.6/libze-intel-gpu1-dbgsym_25.18.33578.6-0_amd64.ddeb && \
wget --progress=dot:giga https://github.com/intel/compute-runtime/releases/download/25.18.33578.6/libze-intel-gpu1_25.18.33578.6-0_amd64.deb && \
dpkg -i -- *.deb
WORKDIR /
# Make sure everything is up to date.
# hadolint ignore=DL3008
RUN https_proxy=http://proxy.example.com:3128/ apt-get update && \
https_proxy=http://proxy.example.com:3128/ apt-get upgrade -y --no-install-recommends --fix-missing && \
apt-get autoremove -y && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Remove linux-libc-dev for security reasons without disturbing anything else.
RUN dpkg -r --force-depends linux-libc-dev
# Copy the startup script to the /bin/ folder and make executable.
COPY startup.sh /bin/
RUN chmod 755 /bin/startup.sh
# Volumes that can be used by the image when making containers.
VOLUME [ "/deps" ]
VOLUME [ "/ComfyUI" ]
VOLUME [ "/models" ]
VOLUME [ "/root/.cache/huggingface" ]
# Setup location of Python virtual environment and make sure LD_PRELOAD contains the path of the allocator chosen.
ENV VENVDir=/deps/venv
ENV LD_PRELOAD=${ALLOCATOR_LD_PRELOAD}
# Enable Level Zero system management
# See https://spec.oneapi.io/level-zero/latest/sysman/PROG.html
ENV ZES_ENABLE_SYSMAN=1
# Force 100% available VRAM size for compute-runtime.
# See https://github.com/intel/compute-runtime/issues/586
ENV NEOReadDebugKeys=1
ENV ClDeviceGlobalMemSizeAvailablePercent=100
# Enable double precision emulation. Turned on by default to enable torch.compile to work for various kernels. Turn this off if you need to enable attention
# slicing to address the 4GB single allocation limit with Intel Xe GPUs and lower and don't use torch.compile.
# See https://github.com/intel/compute-runtime/blob/master/opencl/doc/FAQ.md#feature-double-precision-emulation-fp64
ENV OverrideDefaultFP64Settings=1
ENV IGC_EnableDPEmulation=1
# Enable SYCL variables for cache reuse and single threaded mode.
# See https://github.com/intel/llvm/blob/sycl/sycl/doc/EnvironmentVariables.md
ENV SYCL_CACHE_PERSISTENT=1
ENV SYCL_PI_LEVEL_ZERO_SINGLE_THREAD_MODE=1
# Setting to turn on for Intel Xe GPUs that do not have XMX cores which include any iGPUs from Intel Ice Lake to Meteor Lake.
ENV BIGDL_LLM_XMX_DISABLED=1
# Linux only setting that speeds up compute workload submissions allowing them to run concurrently on a single hardware queue. Turned off by default since
# this was only introduced recently with the Xe graphics driver you need to turn on by default manually and development focus has been on using this feature
# with Data Center GPU Max Series. Only also seems to benefit LLMs mostly when Intel encourages turning it on on Intel Arc cards. Also need kernel 6.2 or up.
# See https://www.intel.com/content/www/us/en/developer/articles/guide/level-zero-immediate-command-lists.html
#ENV SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
# Only use if something with Intel's low level libraries aren't working, see https://github.com/intel/xetla/tree/main for more details on what this affects.
#ENV USE_XETLA=OFF
# Set variable for better training performance in case.
# See https://github.com/intel/intel-extension-for-pytorch/issues/296#issuecomment-1461118993
ENV IPEX_XPU_ONEDNN_LAYOUT=1
# Set to false if CPU is to be used to launch ComfyUI. XPU is default.
ARG UseXPU=true
ENV UseXPU=${UseXPU}
# Set to true if ipexrun is to be used to launch ComfyUI. Off by default.
ARG UseIPEXRUN=true
ENV UseIPEXRUN=${UseIPEXRUN}
# Set to the arguments you want to pass to ipexrun.
# Example for CPU: --multi-task-manager 'taskset' --memory-allocator ${ALLOCATOR}
# Example for XPU: --convert-fp64-to-fp32
ARG IPEXRUNArgs="--convert-fp64-to-fp32"
ENV IPEXRUNArgs=${IPEXRUNArgs}
# Pass in ComfyUI arguments as an environment variable so it can be used in startup.sh which passes it on.
ARG ComfyArgs=""
ENV ComfyArgs=${ComfyArgs}
# Set location and entrypoint of the image to the ComfyUI directory and the startup script.
WORKDIR /ComfyUI
ENTRYPOINT [ "startup.sh" ]
startup.sh
#!/bin/sh
CONTAINER_ALREADY_STARTED=/tmp/CONTAINER_ALREADY_STARTED_PLACEHOLDER
# Setup Python virtual environment if we don't see anything there as in a first launch run or if the repository does not exist.
if [ ! -e $CONTAINER_ALREADY_STARTED ]
then
echo "First time launching container, setting things up."
python3 -m venv "$VENVDir"
# Clone repository if we have an empty space available.
git rev-parse --git-dir > /dev/null 2>&1 || git clone https://github.com/comfyanonymous/ComfyUI.git .
git config core.filemode false
FirstLaunch=true
# Make a file in /tmp/ to indicate the first launch step has been executed.
touch "$CONTAINER_ALREADY_STARTED"
fi
# Activate the virtual environment to use for ComfyUI
if [ -f "$VENVDir"/bin/activate ]
then
echo "Activating python venv."
. "$VENVDir"/bin/activate
. /opt/intel/oneapi/setvars.sh
else
echo "Error: Cannot activate python venv. Check installation. Exiting immediately."
exit 1
fi
# Install pip requirements if launching for the first time.
if [ "$FirstLaunch" = "true" ]
then
echo "Installing ComfyUI Python dependencies."
python -m pip install torch==2.3.1+cxx11.abi torchvision==0.18.1+cxx11.abi torchaudio==2.3.1+cxx11.abi intel-extension-for-pytorch==2.3.110+xpu oneccl_bind_pt==2.3.100+xpu --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
pip install --pre pytorch-triton-xpu==3.1.0+91b14bf559 --index-url https://download.pytorch.org/whl/nightly/xpu
pip install -r requirements.txt
fi
# Launch ComfyUI based on whether ipexrun is set to be used or not. Explicit string splitting is done by the shell here so shellcheck warning is ignored.
if [ "$UseIPEXRUN" = "true" ] && [ "$UseXPU" = "true" ]
then
echo "Using ipexrun xpu to launch ComfyUI."
# shellcheck disable=SC2086
exec ipexrun xpu $IPEXRUNArgs main.py $ComfyArgs
elif [ "$UseIPEXRUN" = "true" ] && [ "$UseXPU" = "false" ]
then
echo "Using ipexrun cpu to launch ComfyUI."
# shellcheck disable=SC2086
exec ipexrun $IPEXRUNArgs main.py $ComfyArgs
else
echo "No command to use ipexrun to launch ComfyUI. Launching normally."
# shellcheck disable=SC2086
python3 main.py $ComfyArgs
fi
Not work yet.
Using ipexrun xpu to launch ComfyUI. /usr/bin/startup.sh: 41: exec: ipexrun: not found