-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcompose.yml
More file actions
138 lines (133 loc) · 5.4 KB
/
compose.yml
File metadata and controls
138 lines (133 loc) · 5.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# Production compose file. Uses pre-built image from ghcr.io.
# For local development with build-from-source, use: docker compose -f compose.dev.yml up
#
# Startup order is enforced via healthcheck + condition: service_healthy:
# sluice starts first -> tun2proxy waits for sluice /healthz -> openclaw waits for tun2proxy TUN device.
#
# MCP wiring (one-time setup): OpenClaw connects to sluice's MCP gateway at
# http://sluice:3000/mcp via Streamable HTTP. Configure this once in OpenClaw:
#
# docker exec openclaw openclaw mcp set sluice '{"url":"http://sluice:3000/mcp"}'
#
# For this to work, OpenClaw must be able to resolve the "sluice" hostname.
# Because OpenClaw shares tun2proxy's network namespace and routes DNS through
# the TUN, Docker's embedded DNS (127.0.0.11) is not reachable. We pin
# sluice's IP on the internal network to 172.30.0.2 and give tun2proxy an
# extra_hosts entry pointing "sluice" at that IP. The shared /etc/hosts file
# makes OpenClaw resolve sluice correctly. The SOCKS5 proxy's SelfBypass list
# auto-allows connections to sluice:3000 so MCP traffic is not double-checked
# by policy.
services:
sluice:
image: ghcr.io/nnemirovsky/sluice:latest
restart: unless-stopped
environment:
- TELEGRAM_BOT_TOKEN=${TELEGRAM_BOT_TOKEN}
- TELEGRAM_CHAT_ID=${TELEGRAM_CHAT_ID}
- SLUICE_AGENT_CONTAINER=openclaw
# Set SLUICE_API_TOKEN to enable the REST API on :3000/api/*.
# All /api/* endpoints require this bearer token for auth.
# If unset, the API returns 403 on all /api/* routes.
- SLUICE_API_TOKEN=${SLUICE_API_TOKEN:-}
group_add:
- "${DOCKER_GID:-0}"
volumes:
- ./config.toml:/etc/sluice/config.toml:ro
- sluice-data:/home/sluice/data
- sluice-vault:/home/sluice/.sluice
- sluice-audit:/var/log/sluice
- sluice-ca:/home/sluice/ca
- /var/run/docker.sock:/var/run/docker.sock
healthcheck:
test: ["CMD", "wget", "-qO-", "http://localhost:3000/healthz"]
interval: 10s
timeout: 3s
retries: 3
start_period: 5s
networks:
internal:
ipv4_address: 172.30.0.2
external: {}
tun2proxy:
image: ghcr.io/tun2proxy/tun2proxy-alpine:latest
restart: unless-stopped
cap_add: [NET_ADMIN]
devices:
- /dev/net/tun:/dev/net/tun
command: ["--proxy", "socks5://sluice:1080", "--bypass", "127.0.0.0/8", "--bypass", "::1/128"]
# OpenClaw uses network_mode: "service:tun2proxy", sharing this
# container's network namespace. The caddy sidecar handles external
# access because tun0's default route breaks Docker port forwarding
# (routing asymmetry: replies exit via tun0 instead of eth0).
# extra_hosts adds a /etc/hosts entry so OpenClaw (which shares this
# network namespace) can resolve "sluice" to its pinned internal IP.
# Without this, DNS goes through the TUN and fails to resolve Docker
# service names.
extra_hosts:
- "sluice:172.30.0.2"
healthcheck:
test: ["CMD-SHELL", "ip link show tun0 || exit 1"]
interval: 10s
timeout: 3s
retries: 3
start_period: 5s
networks: [internal]
depends_on:
sluice:
condition: service_healthy
openclaw:
image: ghcr.io/openclaw/openclaw:latest
restart: unless-stopped
container_name: openclaw
network_mode: "service:tun2proxy"
# Source sluice-injected env vars before starting openclaw so all child
# processes (e.g. gemini --acp) inherit phantom tokens in their env.
entrypoint: ["/bin/sh", "-c", "[ -f \"$HOME/.openclaw/.env\" ] && set -a && . \"$HOME/.openclaw/.env\" && set +a; exec docker-entrypoint.sh node openclaw.mjs gateway --allow-unconfigured"]
environment:
- HOME=/home/node
- OPENCLAW_GATEWAY_TOKEN=${OPENCLAW_GATEWAY_TOKEN:-}
- SSL_CERT_FILE=/usr/local/share/ca-certificates/sluice/sluice-ca.crt
- REQUESTS_CA_BUNDLE=/usr/local/share/ca-certificates/sluice/sluice-ca.crt
- NODE_EXTRA_CA_CERTS=/usr/local/share/ca-certificates/sluice/sluice-ca.crt
# npm user-global directory for persistent CLI tool installs (e.g. gemini-cli).
# The openclaw-home volume is mounted at /home/node so these survive restarts.
- NPM_CONFIG_PREFIX=/home/node/.npm-global
- PATH=/home/node/.npm-global/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
# Persistent V8 compile cache to speed up Node.js CLI cold starts.
- NODE_COMPILE_CACHE=/home/node/.node-compile-cache
volumes:
- openclaw-home:/home/node
- sluice-ca:/usr/local/share/ca-certificates/sluice:ro
depends_on:
tun2proxy:
condition: service_healthy
# Reverse proxy for external HTTPS access to the OpenClaw Control UI.
# Terminates TLS with a Cloudflare Origin certificate and forwards to
# the gateway in tun2proxy's network namespace. This sidesteps the
# routing asymmetry that breaks Docker's built-in port forwarding when
# tun2proxy's TUN device captures reply packets.
caddy:
image: caddy:2-alpine
restart: unless-stopped
ports:
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile:ro
- /etc/cloudflare:/etc/cloudflare:ro
networks: [internal, external]
depends_on:
tun2proxy:
condition: service_healthy
networks:
internal:
internal: true
ipam:
config:
- subnet: 172.30.0.0/24
external: {}
volumes:
sluice-data:
sluice-vault:
sluice-audit:
sluice-ca:
openclaw-home: