-
Notifications
You must be signed in to change notification settings - Fork 18
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
84 lines (68 loc) · 2.49 KB
/
docker-compose.yml
File metadata and controls
84 lines (68 loc) · 2.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
version: '3.4'
# /////////// LINUX ONLY ////////////
# Steps To Run LINUX with gpu Acceleration (need ROOT access like = sudo docker compose up)
# 1. Install Docker and NVIDIA Container Toolkit
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
# sudo apt-get install -y nvidia-container-toolkit
# sudo nvidia-ctk runtime configure --runtime=docker
# sudo systemctl restart docker
# test if GPU acceleration is available
# firs RUN: docker pull nvidia/cuda:12.8.1-runtime-ubuntu24.04
# next To test if GPU acceleration is available, run the following command:
# For cuda 12.8 use this command:
# sudo docker run --rm --gpus all nvidia/cuda:12.8.1-runtime-ubuntu24.04 nvidia-smi
# For cuda 12.2 use this command:
# sudo docker run --rm --gpus all nvidia/cuda:11.6.2-base-ubuntu20.04 nvidia-smi
# FINALLY RUN: sudo docker compose up
# COMUN ERROR:
# When your pc is on sleep mode or hibernation mode, the GPU will not be available for docker containers.
# 1. Check if the GPU is available by running the following command:
# nvidia-smi
# 2. If the GPU is still not available, restart the nvidia_uvm module by running the following command:
# sudo rmmod nvidia_uvm && sudo modprobe nvidia_uvm
# 3 Restart Your PC :D
# //////////////////////////////////////////////////////////////////
services:
openwebui:
image: ghcr.io/open-webui/open-webui:v0.6.5
restart: unless-stopped
depends_on:
- ollamadeepseek
- mcpo
ports:
- '8333:8080'
volumes:
- './open-webui:/app/backend/data'
environment:
OLLAMA_BASE_URL: http://ollamadeepseek:11434
# Run dockerFile of MCPO
mcpo:
build:
context: mcpo
dockerfile: Dockerfile
ports:
- '8334:8000'
ollamadeepseek:
build:
context: .
dockerfile: Dockerfile
# user: root # only for Linux
restart: unless-stopped
ports:
- '11434:11434'
volumes:
- './ollama:/root/.ollama'
environment:
OLLAMA_HOST: 0.0.0.0
OLLAMA_PORT: 11434
OLLAMA_MODELS:
NVIDIA_VISIBLE_DEVICES: all
NVIDIA_DRIVER_CAPABILITIES: all,utility
runtime: nvidia
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]