Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
141 changes: 141 additions & 0 deletions services/cloud-agent-next/Dockerfile.dind
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
ARG SANDBOX_VERSION="0.8.9"

FROM docker.io/cloudflare/sandbox:${SANDBOX_VERSION}-musl AS cloudflare-sandbox

FROM docker:dind-rootless

USER root

# Build arguments for metadata (all optional with defaults)
ARG BUILD_DATE=""
ARG VCS_REF=""
ARG KILOCODE_CLI_VERSION="7.1.23"

# Cloudflare Containers run without root privileges, so Docker must run in
# rootless mode. The Sandbox SDK server is copied into this image so the
# Durable Object can still control the container while dockerd runs as a child
# process.
COPY --from=cloudflare-sandbox /container-server/sandbox /sandbox
COPY --from=cloudflare-sandbox /usr/lib/libstdc++.so.6 /usr/lib/libstdc++.so.6
COPY --from=cloudflare-sandbox /usr/lib/libgcc_s.so.1 /usr/lib/libgcc_s.so.1
COPY --from=cloudflare-sandbox /bin/bash /bin/bash
COPY --from=cloudflare-sandbox /usr/lib/libreadline.so.8 /usr/lib/libreadline.so.8
COPY --from=cloudflare-sandbox /usr/lib/libreadline.so.8.2 /usr/lib/libreadline.so.8.2

RUN apk add --no-cache \
bash \
curl \
git \
git-lfs \
jq \
nodejs \
npm \
openssh-client \
tar \
wget

# Install GitHub CLI from the official release. Alpine packages can lag the
# Debian package used in Dockerfile, so pin the upstream binary archive here.
RUN GH_VERSION="2.82.1" \
&& wget -q -O /tmp/gh.tar.gz "https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_amd64.tar.gz" \
&& tar -xzf /tmp/gh.tar.gz -C /tmp \
&& cp "/tmp/gh_${GH_VERSION}_linux_amd64/bin/gh" /usr/local/bin/gh \
&& chmod +x /usr/local/bin/gh \
&& rm -rf /tmp/gh.tar.gz "/tmp/gh_${GH_VERSION}_linux_amd64"

# Install GitLab CLI from the official Linux amd64 binary archive.
RUN GLAB_VERSION="1.80.4" \
&& wget -q -O /tmp/glab.tar.gz "https://gitlab.com/gitlab-org/cli/-/releases/v${GLAB_VERSION}/downloads/glab_${GLAB_VERSION}_linux_amd64.tar.gz" \
&& tar -xzf /tmp/glab.tar.gz -C /tmp \
&& cp /tmp/bin/glab /usr/local/bin/glab \
&& chmod +x /usr/local/bin/glab \
&& rm -rf /tmp/glab.tar.gz /tmp/bin

# Tools used by the outer sandbox. Kilo itself is still installed globally for
# the existing wrapper path; the platform package bundle under /opt/kilo-agent
# is intended for mounting or copying into inner dev containers.
RUN npm install -g bun pnpm @devcontainers/cli @kilocode/cli@${KILOCODE_CLI_VERSION}

RUN mkdir -p /opt/kilo-agent/bin \
/opt/kilo-agent/cli-linux-x64 \
/opt/kilo-agent/cli-linux-x64-musl \
&& npm pack \
"@kilocode/cli-linux-x64@${KILOCODE_CLI_VERSION}" \
"@kilocode/cli-linux-x64-musl@${KILOCODE_CLI_VERSION}" \
--pack-destination /tmp \
&& tar -xzf "/tmp/kilocode-cli-linux-x64-${KILOCODE_CLI_VERSION}.tgz" \
-C /opt/kilo-agent/cli-linux-x64 --strip-components=1 \
&& tar -xzf "/tmp/kilocode-cli-linux-x64-musl-${KILOCODE_CLI_VERSION}.tgz" \
-C /opt/kilo-agent/cli-linux-x64-musl --strip-components=1 \
&& rm -f /tmp/kilocode-cli-linux-x64-*.tgz \
&& rm -f /tmp/kilocode-cli-linux-x64-musl-*.tgz \
&& rm -f /opt/kilo-agent/cli-linux-x64/bin/*.map \
&& rm -f /opt/kilo-agent/cli-linux-x64-musl/bin/*.map \
&& chmod +x /opt/kilo-agent/cli-linux-x64/bin/kilo \
&& chmod +x /opt/kilo-agent/cli-linux-x64-musl/bin/kilo

RUN cat > /opt/kilo-agent/bin/kilo <<'EOF' \
&& chmod +x /opt/kilo-agent/bin/kilo
#!/bin/sh
set -eu

root="${KILO_AGENT_ROOT:-$(CDPATH= cd -- "$(dirname -- "$0")/.." && pwd)}"
arch="$(uname -m)"

if ldd --version 2>&1 | grep -qi musl; then
libc="musl"
else
libc="glibc"
fi

case "$arch:$libc" in
x86_64:glibc) exec "$root/cli-linux-x64/bin/kilo" "$@" ;;
x86_64:musl) exec "$root/cli-linux-x64-musl/bin/kilo" "$@" ;;
*) echo "Unsupported devcontainer platform: $arch/$libc" >&2; exit 1 ;;
esac
EOF

# === Build wrapper bundle inside container ===
# This mirrors Dockerfile but builds on Alpine, matching the DIND base image.
# /opt/kilo-cloud/ is the canonical location so the wrapper bundle can be
# bind-mounted read-only into a dev container; /usr/local/bin/ keeps symlinks
# so existing outer-sandbox callers (`bun /usr/local/bin/kilocode-wrapper.js`)
# continue to work unchanged.
COPY wrapper /tmp/wrapper-build/wrapper
COPY src/shared /tmp/wrapper-build/src/shared

RUN mkdir -p /opt/kilo-cloud \
&& cd /tmp/wrapper-build/wrapper \
&& bun install --production \
&& bun build src/main.ts --outfile=/opt/kilo-cloud/kilocode-wrapper.js --target=bun --minify \
&& bun build src/restore-session.ts --outfile=/opt/kilo-cloud/kilo-restore-session.js --target=bun --minify \
&& ln -sf /opt/kilo-cloud/kilocode-wrapper.js /usr/local/bin/kilocode-wrapper.js \
&& ln -sf /opt/kilo-cloud/kilo-restore-session.js /usr/local/bin/kilo-restore-session.js \
&& rm -rf /tmp/wrapper-build

# Boot script for rootless Docker-in-Docker.
#
# `dockerd-entrypoint.sh` is the upstream rootless wrapper from the
# `docker:dind-rootless` image. It performs the rootlesskit/user namespace
# setup (mounting cgroup v2, /run, etc.) before exec'ing dockerd. Calling
# `dockerd` directly skips that setup and breaks rootless mode.
#
# `--iptables=false --ip6tables=false` disables in-container iptables setup
# because nf_tables / ip_tables modules are not available inside Cloudflare
# Containers. Inner Docker commands must therefore use `--network=host`.
#
# `--exec-opt native.cgroupdriver=cgroupfs` is required in production: the
# default systemd cgroup driver fails when starting inner containers with
# `unable to start unit ... No such process`. The cgroupfs driver avoids
# the systemd dependency. See cloudflare/sandbox-sdk#662.
RUN printf '#!/bin/sh\n\
set -eu\n\
dockerd-entrypoint.sh dockerd --iptables=false --ip6tables=false --exec-opt native.cgroupdriver=cgroupfs &\n\
until docker version >/dev/null 2>&1; do sleep 0.2; done\n\
echo "Docker is ready"\n\
wait\n' > /home/rootless/boot-docker-for-dind.sh \
&& chmod +x /home/rootless/boot-docker-for-dind.sh \
&& chown rootless:rootless /home/rootless/boot-docker-for-dind.sh

ENTRYPOINT ["/sandbox"]
CMD ["/home/rootless/boot-docker-for-dind.sh"]
6 changes: 4 additions & 2 deletions services/cloud-agent-next/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,11 @@
"preinstall": "npx only-allow pnpm",
"deploy": "wrangler deploy",
"predev": "pnpm run build:wrapper",
"dev": "wrangler dev --env 'dev'",
"dev": "scripts/dev-with-docker-proxy.sh --env dev",
"dev:no-proxy": "wrangler dev --env 'dev'",
"dev:docker-proxy": "node scripts/docker-privileged-proxy.mjs",
"prestart": "pnpm run build:wrapper",
"start": "wrangler dev",
"start": "scripts/dev-with-docker-proxy.sh",
"types": "wrangler types",
"lint": "pnpm -w exec oxlint --config .oxlintrc.json services/cloud-agent-next/src services/cloud-agent-next/wrapper/src",
"format": "oxfmt src",
Expand Down
39 changes: 39 additions & 0 deletions services/cloud-agent-next/scripts/dev-with-docker-proxy.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
#!/bin/sh
# Run `wrangler dev` with a local Docker socket proxy that injects
# HostConfig.Privileged=true for SandboxSmall (Docker-in-Docker).
#
# See scripts/docker-privileged-proxy.mjs for context.
# Args after `--` are forwarded to wrangler dev.

set -eu

script_dir="$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)"
service_dir="$(CDPATH= cd -- "$script_dir/.." && pwd)"

# Unix-domain sockets have a ~104-byte path limit on macOS, so we cannot put
# the socket under the worktree's .wrangler/ directory. Derive a short stable
# path under $TMPDIR keyed on the service directory so multiple worktrees can
# coexist.
hash="$(printf '%s' "$service_dir" | shasum | cut -c1-10)"
socket="${DOCKER_PROXY_SOCKET:-${TMPDIR:-/tmp}/cloud-agent-dind-${hash}.sock}"
# Strip any trailing slash $TMPDIR may carry on macOS.
socket="$(printf '%s' "$socket" | sed 's:/\{1,\}:/:g')"
export DOCKER_PROXY_SOCKET="$socket"

node "$script_dir/docker-privileged-proxy.mjs" &
proxy=$!
trap 'kill $proxy 2>/dev/null || true' EXIT INT TERM

i=0
while [ $i -lt 100 ]; do
[ -S "$socket" ] && break
sleep 0.1
i=$((i + 1))
done

if [ ! -S "$socket" ]; then
echo "Docker proxy socket not found at $socket." >&2
exit 1
fi

DOCKER_HOST="unix://$socket" exec wrangler dev "$@"
117 changes: 117 additions & 0 deletions services/cloud-agent-next/scripts/docker-privileged-proxy.mjs
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
// Docker socket proxy that injects HostConfig.Privileged=true into
// `POST /containers/create` requests.
//
// Why this exists
// ---------------
// Cloudflare Containers run our `SandboxSmall` image (Docker-in-Docker)
// privileged in production, but local `wrangler dev` has no supported way
// to set Docker container create options like `HostConfig.Privileged=true`.
// Without that, rootless dockerd inside the Sandbox container fails to set
// up its mounts and `/var/run/docker.sock` never appears.
//
// Workaround: run a small Unix-socket proxy on the developer machine that
// forwards Docker API calls to the host's real Docker socket and rewrites
// `POST /containers/create` bodies to set `HostConfig.Privileged=true`.
// `pnpm dev` then runs Wrangler with `DOCKER_HOST` pointed at this proxy.
//
// This matches the workaround documented in cloudflare/sandbox-sdk#662 and
// the `sandbox-dind-repro` reference repository.

import { execFileSync } from 'node:child_process';
import fs from 'node:fs';
import net from 'node:net';
import os from 'node:os';
import path from 'node:path';

function normalizeSocket(socket) {
return socket?.startsWith('unix://') ? socket.slice('unix://'.length) : socket;
}

function getDockerContextSocket() {
try {
const socket = execFileSync(
'docker',
['context', 'inspect', '--format', '{{.Endpoints.docker.Host}}'],
{
encoding: 'utf8',
stdio: ['ignore', 'pipe', 'ignore'],
}
).trim();

return normalizeSocket(socket);
} catch {
return undefined;
}
}

const listenPath =
process.env.DOCKER_PROXY_SOCKET ?? path.join(process.cwd(), '.wrangler/docker-privileged.sock');
const targetPath =
normalizeSocket(process.env.DOCKER_SOCKET) ?? getDockerContextSocket() ?? '/var/run/docker.sock';

fs.mkdirSync(path.dirname(listenPath), { recursive: true });
try {
fs.unlinkSync(listenPath);
} catch (error) {
if (error.code !== 'ENOENT') throw error;
}

const server = net.createServer(client => {
let buffered = Buffer.alloc(0);
let patched = false;
const upstream = net.createConnection(targetPath);

upstream.on('data', chunk => client.write(chunk));
upstream.on('error', error => {
console.error(`Docker upstream error: ${error.message}`);
client.destroy();
});
client.on('error', () => upstream.destroy());
client.on('end', () => upstream.end());
upstream.on('end', () => client.end());

client.on('data', chunk => {
if (patched) {
upstream.write(chunk);
return;
}

buffered = Buffer.concat([buffered, chunk]);
const headerEnd = buffered.indexOf('\r\n\r\n');

if (headerEnd === -1) return;

const header = buffered.slice(0, headerEnd).toString('utf8');
const bodyStart = headerEnd + 4;
const match = header.match(/^POST\s+\S*\/containers\/create(?:\?|\s)/);
const contentLength = header.match(/\r\nContent-Length:\s*(\d+)/i);

if (!match || !contentLength) {
patched = true;
upstream.write(buffered);
return;
}

const length = Number(contentLength[1]);
if (buffered.length < bodyStart + length) return;

const body = buffered.slice(bodyStart, bodyStart + length).toString('utf8');
const rest = buffered.slice(bodyStart + length);
const payload = JSON.parse(body);
payload.HostConfig = { ...payload.HostConfig, Privileged: true };
const nextBody = Buffer.from(JSON.stringify(payload));
const nextHeader = header.replace(/(\r\nContent-Length:\s*)\d+/i, `$1${nextBody.length}`);

patched = true;
upstream.write(Buffer.concat([Buffer.from(`${nextHeader}\r\n\r\n`), nextBody, rest]));
});
});

server.listen(listenPath, () => {
if (os.platform() !== 'win32') fs.chmodSync(listenPath, 0o600);
console.log(`Docker privileged proxy listening on ${listenPath}`);
console.log(`Forwarding to ${targetPath}`);
});

process.on('SIGINT', () => server.close(() => process.exit(0)));
process.on('SIGTERM', () => server.close(() => process.exit(0)));
23 changes: 23 additions & 0 deletions services/cloud-agent-next/src/execution/orchestrator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,14 @@ import type {
} from '../types.js';
import type { CloudAgentSession } from '../persistence/CloudAgentSession.js';
import type { ExecutionPlan, ExecutionResult } from './types.js';
import type { DevContainerHandle } from '../kilo/devcontainer.js';
import { ExecutionError } from './errors.js';
import { SessionService, type PreparedSession } from '../session-service.js';
import { logger } from '../logger.js';
import { logSandboxOperationTimeout } from '../sandbox-timeout-logging.js';
import { updateGitRemoteToken } from '../workspace.js';
import { WrapperClient, type WrapperPromptOptions } from '../kilo/wrapper-client.js';
import { bringUpDevContainer, KILO_CLI_VERSION } from '../kilo/devcontainer.js';
import { withDORetry } from '../utils/do-retry.js';
import { normalizeAgentMode } from '../schema.js';
import { buildImagePromptParts, downloadImagePromptParts } from './image-prompt-parts.js';
Expand Down Expand Up @@ -128,11 +130,15 @@ export class ExecutionOrchestrator {
let wrapperClient: WrapperClient;
let kiloSessionId: string;
try {
const devcontainer = await this.ensureDevContainerHandleIfNeeded(prepared, plan);
const fixedPort = plan.workspace.existingMetadata?.devcontainer?.wrapperPort;
const result = await WrapperClient.ensureWrapper(sandbox, prepared.session, {
agentSessionId: sessionId,
userId,
workspacePath: prepared.context.workspacePath,
sessionId: wrapper.kiloSessionId,
devcontainer,
fixedPort: devcontainer ? fixedPort : undefined,
});
wrapperClient = result.client;
kiloSessionId = result.sessionId;
Expand Down Expand Up @@ -219,6 +225,23 @@ export class ExecutionOrchestrator {
// Private Helpers
// ---------------------------------------------------------------------------

private async ensureDevContainerHandleIfNeeded(
prepared: PreparedSession,
plan: ExecutionPlan
): Promise<DevContainerHandle | undefined> {
const devcontainer = plan.workspace.existingMetadata?.devcontainer;
if (!devcontainer) return undefined;

return bringUpDevContainer(prepared.session, {
workspacePath: devcontainer.workspacePath,
sessionHome: prepared.context.sessionHome,
agentSessionId: plan.sessionId,
wrapperPort: devcontainer.wrapperPort,
kiloCliVersion: KILO_CLI_VERSION,
configPath: devcontainer.configPath,
});
}

/**
* Prepare workspace based on the workspace plan.
* Handles three paths: resume, fast path (fully prepared), and full init.
Expand Down
6 changes: 6 additions & 0 deletions services/cloud-agent-next/src/execution/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,12 @@ export type ExistingSessionMetadata = {
branchName: string;
sandboxId?: string;
sessionHome?: string;
devcontainer?: {
workspacePath: string;
innerWorkspaceFolder: string;
wrapperPort: number;
configPath: string;
};
upstreamBranch?: string;
appendSystemPrompt?: string;
/** GitHub repo (for token updates) */
Expand Down
Loading