The following configuration files provide complete working examples for deploying the sandbox in production environments. These are reference implementations that combine all security layers into deployable configurations.
Docker Compose is suitable for single-host deployments or development environments. This configuration sets up the egress proxy, authentication service, session store, and container manager as interconnected services.
File: docker-compose.production.yml
version: '3.8'
services:
# Egress Proxy
egress-proxy:
image: envoyproxy/envoy:v1.28-latest
container_name: egress-proxy
volumes:
- ./envoy.yaml:/etc/envoy/envoy.yaml:ro
- ./jwt-keys:/etc/envoy/keys:ro
ports:
- '8443:8443'
networks:
- egress-network
restart: unless-stopped
logging:
driver: 'json-file'
options:
max-size: '10m'
max-file: '3'
# Auth Service
auth-service:
build: ./auth-service
container_name: auth-service
environment:
- REDIS_HOST=redis
- JWT_PRIVATE_KEY_PATH=/keys/private-key.pem
volumes:
- ./jwt-keys:/keys:ro
depends_on:
- redis
networks:
- backend-network
restart: unless-stopped
# Redis (Session Store)
redis:
image: redis:7-alpine
container_name: redis
command: redis-server --maxmemory 256mb --maxmemory-policy allkeys-lru
networks:
- backend-network
volumes:
- redis-data:/data
restart: unless-stopped
# Container Manager API
container-manager:
build: ./container-manager
container_name: container-manager
privileged: true # Needed to spawn containers
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./jwt-keys:/keys:ro
- /srv/sandbox:/srv/sandbox
environment:
- AUTH_SERVICE_URL=http://auth-service:5000
- REDIS_HOST=redis
depends_on:
- auth-service
- redis
- egress-proxy
networks:
- backend-network
- egress-network
ports:
- '8080:8080'
restart: unless-stopped
networks:
backend-network:
driver: bridge
internal: true
egress-network:
driver: bridge
volumes:
redis-data:For production deployments at scale, use this Kubernetes configuration. It includes gVisor RuntimeClass, NetworkPolicy for traffic isolation, PodSecurityPolicy for hardening, and all necessary ConfigMaps and Secrets.
File: k8s-sandbox-stack.yaml
---
# Namespace
apiVersion: v1
kind: Namespace
metadata:
name: sandbox-system
---
# gVisor RuntimeClass
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: gvisor
handler: runsc
---
# ConfigMap for Envoy
apiVersion: v1
kind: ConfigMap
metadata:
name: envoy-config
namespace: sandbox-system
data:
envoy.yaml: |
# (Complete envoy.yaml from Layer 3)
static_resources:
listeners:
- name: egress_listener
address:
socket_address:
address: 0.0.0.0
port_value: 8443
# ... (rest of config)
---
# Secret for JWT Keys
apiVersion: v1
kind: Secret
metadata:
name: jwt-keys
namespace: sandbox-system
type: Opaque
data:
public-key.pem: LS0tLS1CRUdJTi... # base64 encoded
private-key.pem: LS0tLS1CRUdJTi... # base64 encoded
---
# Egress Proxy Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: egress-proxy
namespace: sandbox-system
spec:
replicas: 3
selector:
matchLabels:
app: egress-proxy
template:
metadata:
labels:
app: egress-proxy
spec:
containers:
- name: envoy
image: envoyproxy/envoy:v1.28-latest
ports:
- containerPort: 8443
name: proxy
volumeMounts:
- name: config
mountPath: /etc/envoy
- name: keys
mountPath: /etc/envoy/keys
readOnly: true
resources:
requests:
memory: '256Mi'
cpu: '500m'
limits:
memory: '512Mi'
cpu: '1000m'
volumes:
- name: config
configMap:
name: envoy-config
- name: keys
secret:
secretName: jwt-keys
---
# Egress Proxy Service
apiVersion: v1
kind: Service
metadata:
name: egress-proxy
namespace: sandbox-system
spec:
selector:
app: egress-proxy
ports:
- port: 8443
targetPort: 8443
name: proxy
type: ClusterIP
---
# NetworkPolicy for Sandbox Pods
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: sandbox-egress-policy
namespace: sandbox-system
spec:
podSelector:
matchLabels:
sandbox: 'true'
policyTypes:
- Egress
egress:
# DNS
- to:
- namespaceSelector:
matchLabels:
name: kube-system
ports:
- protocol: UDP
port: 53
# Egress proxy only
- to:
- podSelector:
matchLabels:
app: egress-proxy
ports:
- protocol: TCP
port: 8443
---
# PodSecurityPolicy for Sandboxes
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: sandbox-restricted
namespace: sandbox-system
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
allowedCapabilities:
- CHOWN
- SETUID
- SETGID
- NET_BIND_SERVICE
volumes:
- 'configMap'
- 'emptyDir'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAsNonRoot'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: true
---
# Example Sandbox Pod
apiVersion: v1
kind: Pod
metadata:
name: user-sandbox-001
namespace: sandbox-system
labels:
sandbox: 'true'
user-id: 'user_123'
spec:
runtimeClassName: gvisor
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 1000
containers:
- name: sandbox
image: your-sandbox-image:latest
env:
- name: CONTAINER_ID
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: HTTP_PROXY
value: 'http://egress-proxy.sandbox-system.svc.cluster.local:8443'
- name: HTTPS_PROXY
value: 'http://egress-proxy.sandbox-system.svc.cluster.local:8443'
resources:
requests:
memory: '1Gi'
cpu: '500m'
limits:
memory: '4Gi'
cpu: '2000m'
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
capabilities:
drop:
- ALL
add:
- CHOWN
- SETUID
- SETGID
volumeMounts:
- name: tmp
mountPath: /tmp
- name: user-outputs
mountPath: /mnt/outputs
- name: skills
mountPath: /mnt/skills
readOnly: true
volumes:
- name: tmp
emptyDir:
sizeLimit: 1Gi
- name: user-outputs
persistentVolumeClaim:
claimName: user-123-outputs
- name: skills
configMap:
name: sandbox-skills