-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathdocker-compose.yaml
More file actions
58 lines (56 loc) · 1.52 KB
/
docker-compose.yaml
File metadata and controls
58 lines (56 loc) · 1.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
services:
local-ai-head:
image: localai/localai:latest-aio-gpu-nvidia-cuda-12
command: --models-path /models --p2ptoken ${P2PTOKEN} --federated
--context-size 2048 --threads 10 --localai-config-dir /configuration
environment:
- DEBUG=true
volumes:
- ./models:/models
- ./tmp/generated/images:/tmp/generated/images
- ./tmp/audio:/tmp/audio
- ./configuration:/configuration
ports:
- 8080:8080
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
restart: unless-stopped
local-ai-worker:
image: localai/localai:latest-aio-gpu-nvidia-cuda-12
command: --models-path /models
--context-size 2048 --threads 6 --localai-config-dir /configuration
worker p2p-llama-cpp-rpc --token ${P2PTOKEN}
environment:
- DEBUG=true
volumes:
- ./models:/models
- ./tmp/generated/images:/tmp/generated/images
- ./tmp/audio:/tmp/audio
- ./configuration:/configuration
ports:
- 8090:8090
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
restart: unless-stopped
depends_on:
- local-ai-head
helper-bot:
build: .
depends_on:
- local-ai-head
ports:
- 8085:8085
environment:
- AI_BASEURL=http://local-ai-head:8080
- AI_ENDPOINT=http://local-ai-head:8080
restart: unless-stopped