git clone https://github.com/Leon-Sander/Local-Multimodal-AI-Chat.git
cd Local-Multimodal-AI-Chat/
vi docker-compose.yml
docker compose up -d
docker exec -it local-multimodal-ai-chat-ollama-1 /usr/bin/ollama pull gemma3:12b


root@server1:~/Local-Multimodal-AI-Chat# cat docker-compose.yml
services:
  app:
    build:
      context: .
      dockerfile: docker/Dockerfile
    volumes:
      - ./:/app
    ports:
      - "8501:8501"
    environment:
      - PYTHONUNBUFFERED=1
    command: /bin/sh -c "python3 database_operations.py && streamlit run app.py --server.fileWatcherType poll"
    depends_on:
      - ollama

  ollama:
    image: ollama/ollama:latest
    ports:
      - "11434:11434"
    volumes:
      - /path/to/ollama/models:/root/.ollama # Update with the path to your ollama model storage
    environment:
      - OLLAMA_LOAD_TIMEOUT=15m # increase if you get a Timeout Error

    # remove deploy and runtime if you are not using a GPU
    #    deploy:
    #  resources:
    #    reservations:
    #      devices:
    #        - driver: nvidia
    #          count: 1
    #          capabilities: [gpu]

  • No labels