DRAFT
vi Dockerfile vi docker-compose.yml docker compose up -d |
root@server1:~/webui-ollama-cpu# docker ps -a CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES da8a3c39db4b ghcr.io/open-webui/open-webui:main "bash start.sh" About a minute ago Up About a minute (healthy) 0.0.0.0:3000->8080/tcp, [::]:3000->8080/tcp open-webui 9bcdc1c79735 sdnext-ipex:latest "startup.sh -f --use…" About a minute ago Up About a minute (health: starting) 0.0.0.0:7860->7860/tcp, [::]:7860->7860/tcp sdnext-ipex 632585ea92db ollama/ollama:latest "/bin/ollama serve" About a minute ago Up About a minute 11434/tcp |
Web UI + ollama




Example of Running model Deepseek V2 on CPU (GIF Video speed is 1x)

SD.Next






Prompt: a cat is sitting on a bench in a park, orange cat, two crutches near bench, benches, park background, animals in the streets Parameters: Steps: 20| Size: 1024x1024| Seed: 4213737610| CFG scale: 6| Model: TempestV0.1-Artistic| Model hash: 9ca260b31e| App: SD.Next| Version: 12ebadc| Operations: txt2img| Pipeline: StableDiffusionXLPipeline Time: 2m 26.65s | pipeline 138.58 decode 6.00 move 3.15 prompt 1.96 | GPU 11098 MB 9% | RAM 4.79 GB 4% |

Prompt: a cat is sitting on a bench in a park, orange cat, two crutches near bench, benches, park background, animals in the streets Parameters: Steps: 20| Size: 1024x1024| Seed: 824884706| CFG scale: 6| Model: dreamshaperXL_v21TurboDPMSDE| Model hash: 4496b36d48| App: SD.Next| Version: 12ebadc| Operations: txt2img| Pipeline: StableDiffusionXLPipeline Time: 2m 24.87s | pipeline 137.31 decode 5.87 move 2.45 prompt 1.58 | GPU 11098 MB 9% | RAM 4.8 GB 4% |

Dockerfile
# SD.Next IPEX Dockerfile
# docs: <https://github.com/vladmandic/sdnext/wiki/Docker>
# base image
FROM ubuntu:noble
# metadata
LABEL org.opencontainers.image.vendor="SD.Next"
LABEL org.opencontainers.image.authors="disty0"
LABEL org.opencontainers.image.url="https://github.com/vladmandic/sdnext/"
LABEL org.opencontainers.image.documentation="https://github.com/vladmandic/sdnext/wiki/Docker"
LABEL org.opencontainers.image.source="https://github.com/vladmandic/sdnext/"
LABEL org.opencontainers.image.licenses="AGPL-3.0"
LABEL org.opencontainers.image.title="SD.Next IPEX"
LABEL org.opencontainers.image.description="SD.Next: Advanced Implementation of Stable Diffusion and other Diffusion-based generative image models"
LABEL org.opencontainers.image.base.name="https://hub.docker.com/_/ubuntu:noble"
LABEL org.opencontainers.image.version="latest"
# essentials
RUN apt-get update && \
apt-get install -y --no-install-recommends --fix-missing \
software-properties-common \
build-essential \
ca-certificates \
wget \
gpg \
git
# intel compute runtime
#RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
#RUN echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu noble client" | tee /etc/apt/sources.list.d/intel-gpu-noble.list
RUN apt-get update
RUN apt-get install -y --no-install-recommends --fix-missing \
intel-opencl-icd \
libze-intel-gpu1 \
libze1
# required by pytorch / ipex
RUN apt-get install -y --no-install-recommends --fix-missing \
libgl1 \
libglib2.0-0 \
libgomp1
# python3.12
RUN apt-get install -y --no-install-recommends --fix-missing \
python3 \
python3-dev \
python3-venv \
python3-pip
# jemalloc is not required but it is highly recommended (also used with optional ipexrun)
RUN apt-get install -y --no-install-recommends --fix-missing libjemalloc-dev
ENV LD_PRELOAD=libjemalloc.so.2
# cleanup
RUN /usr/sbin/ldconfig
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
# stop pip and uv from caching
ENV PIP_NO_CACHE_DIR=true
ENV UV_NO_CACHE=true
# set paths to use with sdnext
ENV SD_DOCKER=true
ENV SD_DATADIR="/mnt/data"
ENV SD_MODELSDIR="/mnt/models"
ENV venv_dir="/mnt/python/venv"
# paths used by sdnext can be a volume if necessary
#VOLUME [ "/app" ]
#VOLUME [ "/mnt/data" ]
#VOLUME [ "/mnt/models" ]
#VOLUME [ "/mnt/python" ]
#VOLUME [ "/root/.cache/huggingface" ]
# intel specific environment variables
#ENV IPEX_SDPA_SLICE_TRIGGER_RATE=1
#ENV IPEX_ATTENTION_SLICE_RATE=0.5
#ENV IPEX_FORCE_ATTENTION_SLICE=-1
#ENV IPEXRUN=False
# git clone and run sdnext
RUN echo '#!/bin/bash\ngit status || git clone https://github.com/vladmandic/sdnext.git .\n/app/webui.sh "$@"' | tee /bin/startup.sh
RUN chmod 755 /bin/startup.sh
# actually run sdnext
WORKDIR /app
ENTRYPOINT [ "startup.sh", "-f", "--use-ipex", "--uv", "--listen", "--debug", "--api-log", "--log", "sdnext.log" ]
# expose port
EXPOSE 7860
# healthcheck function
HEALTHCHECK --interval=60s --timeout=10s --start-period=60s --retries=3 CMD curl --fail http://localhost:7860/sdapi/v1/status || exit 1
# stop signal
STOPSIGNAL SIGINT
|
docker-compose.yml
services:
ollama:
container_name: ollama
image: ollama/ollama:${OLLAMA_DOCKER_TAG-latest}
pull_policy: always
restart: unless-stopped
volumes:
- /docker/ollama:/root/.ollama
tty: true
open-webui:
image: ghcr.io/open-webui/open-webui:${WEBUI_DOCKER_TAG-main}
container_name: open-webui
volumes:
- /docker/webui:/app/backend/data
depends_on:
- ollama
ports:
- ${OPEN_WEBUI_PORT-3000}:8080
environment:
- 'OLLAMA_BASE_URL=http://ollama:11434'
- 'ENABLE_OPENAI_API=False'
- 'ENABLE_OLLAMA_API=True'
- 'WEBUI_SECRET_KEY='
- 'ENABLE_IMAGE_GENERATION=True'
- IMAGE_GENERATION_ENGINE=automatic1111
- IMAGE_GENERATION_MODEL=dreamshaper_8
- IMAGE_SIZE=400x400
- IMAGE_STEPS=8
- AUTOMATIC1111_BASE_URL=http://sdnext-ipex:7860/
- AUTOMATIC1111_CFG_SCALE=2
- AUTOMATIC1111_SAMPLER=DPM++ SDE
- AUTOMATIC1111_SCHEDULER=Karras
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
sdnext-ipex:
build:
dockerfile: Dockerfile
image: sdnext-ipex:latest
container_name: sdnext-ipex
restart: unless-stopped
devices:
- /dev/dri:/dev/dri
ports:
- 7860:7860
volumes:
- /docker/sdnext-app-volume:/app
- /docker/sdnext-mnt-volume:/mnt
- /docker/sdnext-huggingface-volume:/root/.cache/huggingface
- /docker/sdnext-python-volume:/usr/local/lib/python3.10
|