From 7756d07d283b97014b011ee68c6d8b3787bc16f5 Mon Sep 17 00:00:00 2001 From: Zvonimir Date: Tue, 5 May 2026 14:23:24 +0200 Subject: [PATCH 1/7] feat(cli): introduce token-store + scope-gated kafka routes (slice 06 phase 1) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ADR-0003: extend the bearer-token model to per-token scopes without breaking the legacy single-line file format. Backward-compat is non-negotiable: every existing deployment carries a scope-less token on disk; that token must continue to grant access to every API surface. Changes: - New `packages/cli/src/token-store.ts`: pure parser + serializer for the extended file format `[\\t[\\t[\\t]]]`. Legacy lines (no TAB) parse to scopes='*' (full access). Comments and blank lines round-trip byte-identically. Skips malformed lines with a warning sink rather than crashing. - `auth.ts` now exports `loadTokenStore` (structured map) and `verifyTokenScope` (pure scope checker; '*' grants any scope). Existing `loadTokens` is kept as a Set-returning wrapper so the 13 existing call sites (api-client, lifecycle, every route module) compile and behave identically for legacy tokens. - `daemon/routes/kafka.ts`: per-route scope gates wired in: POST /api/kafka/endpoint → kafka:endpoint:write POST /api/kafka/endpoint/verify → kafka:endpoint:write DELETE /api/kafka/endpoint/ → kafka:endpoint:write GET /api/kafka/endpoint → kafka:endpoint:read GET /api/kafka/endpoint/ → kafka:endpoint:read Scope mismatch returns 403 (NOT 401) and omits WWW-Authenticate. - `RequestContext` carries a `tokenStore` alongside the legacy `validTokens` Set. Lifecycle wires both from a single `loadTokenStore()` call; per-agent tokens are added with scopes='*'. - `kafka-route-verify` test ctx mock updated to include a wildcard `tokenStore` so existing route assertions are unaffected by the scope check (scope behavior is covered separately in `daemon-auth-scopes.test.ts`). Tests: - New `daemon-auth-scopes.test.ts` (THE backward-compat test): - legacy scope-less file accepted on every kafka route (200/500, never 401/403) - read-scoped token: 200 on GET, 403 on POST/DELETE - write-scoped token: 200 on POST, 403 on GET - New `token-store.test.ts` (30 cases): legacy/scoped/mixed/malformed parsing, round-trip determinism, lookup helpers. - New `auth-scopes.test.ts`: `verifyTokenScope` ` '*' grants any` semantics, exact-match for explicit lists, fail-closed on unknown. Co-Authored-By: Claude Opus 4.7 (1M context) --- packages/cli/src/auth.ts | 153 +++++-- packages/cli/src/daemon/handle-request.ts | 9 +- packages/cli/src/daemon/lifecycle.ts | 24 +- packages/cli/src/daemon/routes/context.ts | 9 + packages/cli/src/daemon/routes/kafka.ts | 38 +- packages/cli/src/token-store.ts | 384 +++++++++++++++++ packages/cli/test/auth-scopes.test.ts | 81 ++++ packages/cli/test/daemon-auth-scopes.test.ts | 414 +++++++++++++++++++ packages/cli/test/kafka-route-verify.test.ts | 23 +- packages/cli/test/token-store.test.ts | 331 +++++++++++++++ 10 files changed, 1429 insertions(+), 37 deletions(-) create mode 100644 packages/cli/src/token-store.ts create mode 100644 packages/cli/test/auth-scopes.test.ts create mode 100644 packages/cli/test/daemon-auth-scopes.test.ts create mode 100644 packages/cli/test/token-store.test.ts diff --git a/packages/cli/src/auth.ts b/packages/cli/src/auth.ts index 2927858e1..f6cc1cdf3 100644 --- a/packages/cli/src/auth.ts +++ b/packages/cli/src/auth.ts @@ -3,14 +3,47 @@ * * Uses bearer tokens stored on disk. Tokens are auto-generated on first start. * Any interface that needs auth calls `verifyToken(token)` against the loaded set. + * + * Slice 06 — added scoped tokens via the `token-store` deep module. The + * auth file format is extended to allow per-token scopes (see ADR-0003); + * legacy scope-less lines continue to grant full access. The new + * `loadTokenStore` returns the structured map; `loadTokens` is a thin + * wrapper that returns just the set of full-token strings, so the 13 + * existing call sites keep compiling and behaving identically for legacy + * tokens. */ import { randomBytes } from 'node:crypto'; -import { readFile, writeFile, mkdir, chmod } from 'node:fs/promises'; +import { readFile, mkdir, chmod, writeFile } from 'node:fs/promises'; import { join, dirname } from 'node:path'; import { existsSync } from 'node:fs'; import type { IncomingMessage, ServerResponse } from 'node:http'; import { dkgDir } from './config.js'; +import { + parseTokenFile, + serializeTokenStore, + lookupTokenRecord, + setTokenRecord, + type ParsedTokenFile, + type TokenStore, + type TokenRecord, + type Scope, +} from './token-store.js'; + +// Re-export the deep-module types + helpers so call sites only import from +// `auth.ts`. Keeps the seam between the deep parser/serializer module and +// the I/O-bearing auth surface visible in one place. +export type { TokenStore, TokenRecord, Scope } from './token-store.js'; +export { + lookupTokenRecord, + toPublicRecord, + tokenPrefix, + setTokenRecord, + deleteTokenRecord, + serializeTokenStore, + parseTokenFile, + type PublicTokenRecord, +} from './token-store.js'; // --------------------------------------------------------------------------- // Types @@ -27,7 +60,12 @@ export interface AuthConfig { // Token file management // --------------------------------------------------------------------------- -function tokenFilePath(): string { +/** + * Resolve the on-disk auth-token path. Goes through `dkgDir()` so test + * harnesses can redirect via `DKG_HOME=/tmp/...` without touching the + * production location. + */ +export function tokenFilePath(): string { return join(dkgDir(), 'auth.token'); } @@ -36,42 +74,83 @@ function generateToken(): string { } /** - * Load tokens from disk + config. Auto-generates a token file if none exists. - * Returns the set of valid tokens. + * Load the structured token store from disk + config. Auto-generates a + * token file (legacy single-line format → scopes = `'*'`) if none exists. + * + * Config-defined tokens (from `dkg.config.yaml`) are inserted as + * scope-less = root tokens — they are typically operator-supplied + * preshared secrets, and slice 06's contract is "legacy = full access". + * + * Reads the file with `parseTokenFile`, which skips malformed lines + * with a warning rather than crashing. The warning sink defaults to + * `console.warn` — the daemon can route these through `Logger` later. */ -export async function loadTokens(authConfig?: AuthConfig): Promise> { - const tokens = new Set(); +export async function loadTokenStore(authConfig?: AuthConfig): Promise { + const filePath = tokenFilePath(); - // Add any config-defined tokens - if (authConfig?.tokens) { - for (const t of authConfig.tokens) { - if (t.length > 0) tokens.add(t); - } - } + let parsed: ParsedTokenFile = { store: new Map(), preserved: [] }; - // Load or generate the file-based token - const filePath = tokenFilePath(); if (existsSync(filePath)) { try { const raw = await readFile(filePath, 'utf-8'); - for (const line of raw.split('\n')) { - const t = line.trim(); - if (t.length > 0 && !t.startsWith('#')) tokens.add(t); - } + parsed = parseTokenFile(raw, { + onWarning: (msg) => console.warn(`[auth] ${msg}`), + }); } catch { - // Unreadable — generate a fresh one + // Unreadable — fall through to auto-generate, same as the pre-slice-06 + // behavior. Don't lock the operator out of a fresh restart. + } + } + + // Insert config tokens AFTER file tokens so a config token can't + // accidentally clobber a file-only one (the parser already de-dupes + // file lines by prefix). + if (authConfig?.tokens) { + for (const t of authConfig.tokens) { + if (t.length === 0) continue; + const existing = lookupTokenRecord(t, parsed.store); + if (existing) continue; + const record: TokenRecord = { + prefix: t.length >= 8 ? t.slice(0, 8) : t, + fullToken: t, + scopes: '*', + }; + setTokenRecord(parsed.store, record); } } - if (tokens.size === 0) { + // Auto-generate on first run. Legacy single-line format so a + // downgrade to a pre-slice-06 daemon still reads it correctly. + if (parsed.store.size === 0) { const token = generateToken(); - tokens.add(token); + const record: TokenRecord = { + prefix: token.slice(0, 8), + fullToken: token, + scopes: '*', + }; + setTokenRecord(parsed.store, record); + parsed.preserved.push({ + text: '# DKG node API token — treat this like a password', + index: 0, + }); await mkdir(dirname(filePath), { recursive: true }); - await writeFile(filePath, `# DKG node API token — treat this like a password\n${token}\n`, { mode: 0o600 }); + const out = serializeTokenStore(parsed); + await writeFile(filePath, out, { mode: 0o600 }); await chmod(filePath, 0o600); } - return tokens; + return parsed.store; +} + +/** + * Backward-compat wrapper. Returns the set of full-token strings — the + * pre-slice-06 shape of `loadTokens`. Used by 13 call sites (the daemon + * lifecycle, every route module, the api-client). They continue to work + * unchanged for legacy tokens. + */ +export async function loadTokens(authConfig?: AuthConfig): Promise> { + const store = await loadTokenStore(authConfig); + return new Set([...store.values()].map((r) => r.fullToken)); } // --------------------------------------------------------------------------- @@ -87,6 +166,29 @@ export function verifyToken(token: string | undefined, validTokens: Set) return validTokens.has(token); } +/** + * Verify a bearer token has the requested scope. + * + * - `'*'` (root) grants any scope. + * - Explicit scope arrays are exact-match (no globbing). + * - Unknown / unrecognized tokens fail closed (false). + * + * Pure function — every input is explicit, no global state. Callers are + * expected to send the appropriate 403 (NOT 401: the token IS valid; the + * scope is wrong). + */ +export function verifyTokenScope( + token: string | undefined, + requiredScope: Scope, + store: TokenStore, +): boolean { + if (!token) return false; + const record = lookupTokenRecord(token, store); + if (!record) return false; + if (record.scopes === '*') return true; + return record.scopes.includes(requiredScope); +} + /** * Extract a bearer token from an HTTP Authorization header value. * Accepts: "Bearer " or just "". @@ -128,6 +230,11 @@ function isPublicPath(pathname: string): boolean { * * Usage in the server handler: * if (!httpAuthGuard(req, res, authEnabled, validTokens)) return; + * + * Note: scope checks live PER ROUTE (see `daemon/routes/kafka.ts`). The + * guard only enforces "valid token present" — pushing per-route scope + * knowledge into the guard would force it to know every route's required + * scope, which is exactly the smell ADR-0003 calls out. */ export function httpAuthGuard( req: IncomingMessage, diff --git a/packages/cli/src/daemon/handle-request.ts b/packages/cli/src/daemon/handle-request.ts index 07f07dd3a..6be520bee 100644 --- a/packages/cli/src/daemon/handle-request.ts +++ b/packages/cli/src/daemon/handle-request.ts @@ -101,7 +101,7 @@ import { } from '../config.js'; import { createPublisherControlFromStore, startPublisherRuntimeIfEnabled, type PublisherRuntime } from '../publisher-runner.js'; import { createCatchupRunner, type CatchupJobResult, type CatchupRunner } from '../catchup-runner.js'; -import { loadTokens, httpAuthGuard, extractBearerToken } from '../auth.js'; +import { loadTokens, httpAuthGuard, extractBearerToken, type TokenStore } from '../auth.js'; import { ExtractionPipelineRegistry } from '@origintrail-official/dkg-core'; import { MarkItDownConverter, isMarkItDownAvailable, extractFromMarkdown, extractWithLlm } from '../extraction/index.js'; import { @@ -356,6 +356,12 @@ export async function handleRequest( vectorStore: VectorStore, embeddingProvider: EmbeddingProvider | null, validTokens: Set, + // Slice 06 — structured token store; the kafka routes (and the + // upcoming auth-admin routes) read scopes off this. `validTokens` + // is kept in the signature unchanged because the rest of the + // codebase uses the Set-shape; the store is the supplementary + // surface the scope-aware routes consume. + tokenStore: TokenStore, // API socket identity — passed in from the outer daemon closure so // `manifestSelfClient()` can build a self-pointing URL from trusted // server state instead of request headers (SSRF defence). @@ -394,6 +400,7 @@ export async function handleRequest( vectorStore, embeddingProvider, validTokens, + tokenStore, apiHost, apiPortRef, url, diff --git a/packages/cli/src/daemon/lifecycle.ts b/packages/cli/src/daemon/lifecycle.ts index 1de531d10..1b128d62a 100644 --- a/packages/cli/src/daemon/lifecycle.ts +++ b/packages/cli/src/daemon/lifecycle.ts @@ -103,7 +103,7 @@ import { } from '../config.js'; import { createPublisherControlFromStore, startPublisherRuntimeIfEnabled, type PublisherRuntime } from '../publisher-runner.js'; import { createCatchupRunner, type CatchupJobResult, type CatchupRunner } from '../catchup-runner.js'; -import { loadTokens, httpAuthGuard } from '../auth.js'; +import { loadTokens, loadTokenStore, httpAuthGuard, setTokenRecord, type TokenRecord } from '../auth.js'; import { ExtractionPipelineRegistry } from '@origintrail-official/dkg-core'; import { MarkItDownConverter, isMarkItDownAvailable, extractFromMarkdown, extractWithLlm } from '../extraction/index.js'; import { @@ -1364,16 +1364,27 @@ export async function runDaemonInner( // --- Authentication --- const authEnabled = config.auth?.enabled !== false; - const validTokens = await loadTokens(config.auth); + // Slice 06 — load the structured store first; the Set view is derived + // from it. Per-agent tokens are added to BOTH so the existing + // httpAuthGuard (Set-based) continues to accept them, and the new + // scope-aware routes see them as full-access (`scopes: '*'`). + const tokenStore = await loadTokenStore(config.auth); + for (const a of agent.listLocalAgents()) { + const record: TokenRecord = { + prefix: a.authToken.length >= 8 ? a.authToken.slice(0, 8) : a.authToken, + fullToken: a.authToken, + // Per-agent tokens are not user-mintable and have always granted + // unscoped access. Treat them as `'*'` for slice 06's scope check. + scopes: '*', + }; + setTokenRecord(tokenStore, record); + } + const validTokens = new Set([...tokenStore.values()].map((r) => r.fullToken)); const bridgeAuthToken = (await loadBridgeAuthToken()) ?? (validTokens.size > 0 ? (validTokens.values().next().value as string) : undefined); - // Register per-agent Bearer tokens so the auth guard accepts them - for (const a of agent.listLocalAgents()) { - validTokens.add(a.authToken); - } if (authEnabled) { log( @@ -1625,6 +1636,7 @@ export async function runDaemonInner( vectorStore, embeddingProvider, validTokens, + tokenStore, apiHost, apiPortRef, ); diff --git a/packages/cli/src/daemon/routes/context.ts b/packages/cli/src/daemon/routes/context.ts index 930edd9e9..cfa2f4059 100644 --- a/packages/cli/src/daemon/routes/context.ts +++ b/packages/cli/src/daemon/routes/context.ts @@ -21,6 +21,7 @@ import type { ExtractionStatusRecord } from '../../extraction-status.js'; import type { FileStore } from '../../file-store.js'; import type { VectorStore, EmbeddingProvider } from '../../vector-store.js'; import type { CatchupTracker } from '../types.js'; +import type { TokenStore } from '../../auth.js'; export interface RequestContext { req: IncomingMessage; @@ -45,6 +46,14 @@ export interface RequestContext { vectorStore: VectorStore; embeddingProvider: EmbeddingProvider | null; validTokens: Set; + /** + * Slice 06 — structured token store backing `validTokens`. Routes that + * need scope checks (e.g. kafka, auth admin) read from here via + * `verifyTokenScope`. Legacy tokens carry scope `'*'` and pass every + * scope check, preserving backward compat for the routes that have + * not yet been gated. + */ + tokenStore: TokenStore; // API socket identity — trusted server-side state for manifestSelfClient // SSRF defence. apiHost: string; diff --git a/packages/cli/src/daemon/routes/kafka.ts b/packages/cli/src/daemon/routes/kafka.ts index 87335f0f4..a4a6d0148 100644 --- a/packages/cli/src/daemon/routes/kafka.ts +++ b/packages/cli/src/daemon/routes/kafka.ts @@ -11,6 +11,7 @@ import { type KafkaEndpointRequestBody, type KafkaEndpointVerifyRequestBody, } from '../parsers/kafka-request.js'; +import { verifyTokenScope, type Scope } from '../../auth.js'; import type { RequestContext } from './context.js'; import { getKafkaEndpoint, @@ -37,31 +38,58 @@ const VALID_LIST_STATUSES: ReadonlySet = new Set([ 'all', ]); +/** + * Per-route scope guard. Returns true when the caller's bearer token + * carries `scope` (or wildcard `*`); when false, a 403 (NOT 401) has + * already been written to the response — caller must early-return. + * + * 403 distinguishes "valid token, wrong permission" from 401's "no / + * invalid token". The httpAuthGuard at the top of the request pipeline + * already enforces 401; by the time a kafka handler runs we know the + * token is valid, so any rejection here is a scope mismatch. + * + * No `WWW-Authenticate` header — that header is reserved for 401 + * responses by RFC 7235; sending it on a 403 confuses clients about + * whether to re-prompt for credentials. + */ +function requireScope(ctx: RequestContext, scope: Scope): boolean { + if (verifyTokenScope(ctx.requestToken, scope, ctx.tokenStore)) return true; + jsonResponse(ctx.res, 403, { + error: `Token lacks required scope: ${scope}`, + }); + return false; +} + export async function handleKafkaRoutes(ctx: RequestContext): Promise { const { req, path } = ctx; - // POST /api/kafka/endpoint — register + // POST /api/kafka/endpoint — register (write) if (req.method === 'POST' && path === ENDPOINT_BASE_PATH) { + if (!requireScope(ctx, 'kafka:endpoint:write')) return; return handleRegister(ctx); } - // POST /api/kafka/endpoint/verify — re-verify + // POST /api/kafka/endpoint/verify — re-verify (write — mutates the KA) if (req.method === 'POST' && path === `${ENDPOINT_BASE_PATH}/verify`) { + if (!requireScope(ctx, 'kafka:endpoint:write')) return; return handleVerify(ctx); } - // GET /api/kafka/endpoint?contextGraphId=X[&status=...] — list + // GET /api/kafka/endpoint?contextGraphId=X[&status=...] — list (read) if (req.method === 'GET' && path === ENDPOINT_BASE_PATH) { + if (!requireScope(ctx, 'kafka:endpoint:read')) return; return handleList(ctx); } - // GET /api/kafka/endpoint/ — single fetch + // GET /api/kafka/endpoint/ — single fetch (read) if (req.method === 'GET' && path.startsWith(`${ENDPOINT_BASE_PATH}/`)) { + if (!requireScope(ctx, 'kafka:endpoint:read')) return; return handleGetByUri(ctx); } - // DELETE /api/kafka/endpoint/ — soft-revoke + // DELETE /api/kafka/endpoint/ — soft-revoke (write) if (req.method === 'DELETE' && path.startsWith(`${ENDPOINT_BASE_PATH}/`)) { + if (!requireScope(ctx, 'kafka:endpoint:write')) return; return handleRevoke(ctx); } } diff --git a/packages/cli/src/token-store.ts b/packages/cli/src/token-store.ts new file mode 100644 index 000000000..2225335df --- /dev/null +++ b/packages/cli/src/token-store.ts @@ -0,0 +1,384 @@ +/** + * Pure parser + serializer for the DKG auth token file. + * + * File format (extension over the legacy single-line format — backward + * compat is non-negotiable, see ADR-0003): + * + * # legacy → scopes = '*' + * # scoped + * # scoped + name + * # full record + * # comment # preserved on round-trip + * # blank line preserved + * + * Deep-module discipline: no I/O, no global state. The I/O lives in + * `auth.ts` (which calls `parseTokenFile` on the on-disk content). + */ + +// --------------------------------------------------------------------------- +// Types +// --------------------------------------------------------------------------- + +/** + * A scope is a free-form string (e.g. `kafka:endpoint:write`). The literal + * string `'*'` is the wildcard "full access" marker — used for legacy + * scope-less tokens AND for explicitly-root tokens minted via the API. + */ +export type Scope = string; + +/** + * Records survive a round-trip: parsing then serializing the same input + * yields the same bytes for any input that already carries full records + * (token + scopes + name + createdAt). For inputs missing optional fields + * (legacy scope-less, or scope-only-no-name lines), the serializer emits + * the canonical field count for what's present — see `serializeTokenStore`. + */ +export interface TokenRecord { + /** First 8 chars of the full token. Used for list/revoke API surfaces. */ + prefix: string; + /** The on-disk secret. NEVER returned in list/get responses. */ + fullToken: string; + /** Either `'*'` (full access) or a list of explicit scope strings. */ + scopes: Scope[] | '*'; + /** Optional human-readable label. */ + name?: string; + /** ISO-8601 timestamp the token was minted. */ + createdAt?: string; +} + +/** + * Map keyed by token prefix. We key by prefix (not by full token) because + * the prefix is the public identifier (used by list/revoke) — full-token + * lookup goes through `lookupTokenRecord`. + * + * Insertion order is preserved (JS Maps do this), and the serializer + * relies on it for round-trip determinism. + */ +export type TokenStore = Map; + +/** + * Comments + blank lines that lived in the source file. Stored alongside + * the records (with their position relative to the records) so the + * serializer can re-emit them in the right place. Without this, a + * round-trip would silently strip `# comment` headers from operator-edited + * files. + * + * Position semantics: `index` is the record-position the line appears + * BEFORE. An `index` equal to `records.length` means "after the last + * record" (trailing comments / blank lines). + */ +export interface PreservedLine { + /** Verbatim line content (no trailing newline). */ + text: string; + /** Insertion position relative to records. */ + index: number; +} + +/** + * Output of `parseTokenFile`. The serializer takes the same shape so a + * full round-trip stays byte-identical for well-formed inputs. + */ +export interface ParsedTokenFile { + store: TokenStore; + preserved: PreservedLine[]; +} + +/** + * The first 8 chars of a token are the public identifier. Kept short so + * it fits in a CLI table, kept consistent across the codebase by going + * through this single helper. + * + * If a token is shorter than 8 chars (only happens in malformed test + * fixtures — production tokens are 43 chars of base64url) we use the + * whole thing. + */ +export function tokenPrefix(token: string): string { + return token.length >= 8 ? token.slice(0, 8) : token; +} + +// --------------------------------------------------------------------------- +// Parser +// --------------------------------------------------------------------------- + +export interface ParseTokenFileOptions { + /** + * Optional sink for warnings (malformed lines, duplicate prefixes). We + * route through a callback rather than `console.warn` so the daemon can + * forward them to its `Logger`. Defaults to silent. + */ + onWarning?: (msg: string) => void; +} + +/** + * Parse a token-file blob. Skips malformed lines with a warning instead of + * throwing — a single bad line should never lock an operator out. + * + * Lines starting with `#` are comments. Blank lines are preserved. Both + * are recorded as `PreservedLine` entries so the serializer can re-emit + * them in their original positions. + */ +export function parseTokenFile( + raw: string, + opts: ParseTokenFileOptions = {}, +): ParsedTokenFile { + const onWarning = opts.onWarning ?? (() => { /* silent */ }); + + const store: TokenStore = new Map(); + const preserved: PreservedLine[] = []; + + // Splitting on `\n` and stripping a trailing `\r` keeps round-trip + // semantics correct on both LF and CRLF inputs without forcing one or + // the other on the serializer (we always emit LF — see + // `serializeTokenStore`'s contract). A trailing `\n` produces a final + // empty entry which we skip below. + const lines = raw.split('\n'); + + // Drop a single trailing empty entry produced by a file that ends in + // `\n` — this lets the round-trip be byte-identical instead of growing + // an extra blank line on every save. + if (lines.length > 0 && lines[lines.length - 1] === '') { + lines.pop(); + } + + for (const rawLine of lines) { + const line = rawLine.endsWith('\r') ? rawLine.slice(0, -1) : rawLine; + + // Comments + blank lines: preserve verbatim, skip parsing. + if (line.length === 0 || line.startsWith('#')) { + preserved.push({ text: line, index: store.size }); + continue; + } + + // We tolerate leading/trailing whitespace around the WHOLE line for + // legacy compatibility (the old parser called `.trim()`), but we do + // NOT trim per-field — TAB-separated fields can legitimately contain + // leading spaces in the `name` field. Inner whitespace is the + // operator's business. + // + // The fields: + // [0] token (required) + // [1] scopes (comma-separated; empty → `'*'`? NO — empty is a + // parse error; '*' must be explicit. Legacy lines have NO field + // 1 at all, which IS the '*' case.) + // [2] name (optional, free-form) + // [3] createdAt (optional, ISO-8601) + // + // Lines with more than 4 fields are malformed. Lines with empty + // token field are malformed. + const fields = line.split('\t'); + const token = fields[0]?.trim() ?? ''; + if (token.length === 0) { + onWarning(`token-store: skipping malformed line (empty token): ${truncateForLog(line)}`); + continue; + } + + let scopes: Scope[] | '*'; + if (fields.length === 1) { + // Legacy: token-only line. Full access. + scopes = '*'; + } else { + const scopesField = fields[1] ?? ''; + const parsed = parseScopesField(scopesField); + if (parsed === null) { + onWarning( + `token-store: skipping malformed line (bad scopes field "${truncateForLog(scopesField)}"): token=${tokenPrefix(token)}`, + ); + continue; + } + scopes = parsed; + } + + if (fields.length > 4) { + onWarning( + `token-store: skipping malformed line (too many tab-separated fields, expected ≤4 got ${fields.length}): token=${tokenPrefix(token)}`, + ); + continue; + } + + const name = fields[2] && fields[2].length > 0 ? fields[2] : undefined; + const createdAt = fields[3] && fields[3].length > 0 ? fields[3] : undefined; + + const prefix = tokenPrefix(token); + + if (store.has(prefix)) { + onWarning( + `token-store: duplicate token prefix ${prefix} — keeping first occurrence`, + ); + continue; + } + + const record: TokenRecord = { prefix, fullToken: token, scopes }; + if (name !== undefined) record.name = name; + if (createdAt !== undefined) record.createdAt = createdAt; + store.set(prefix, record); + } + + return { store, preserved }; +} + +/** + * Parse a single scopes field. Returns `'*'` when the literal `*` is + * supplied, an array of trimmed non-empty scopes for a comma list, or + * `null` to signal a malformed field (caller skips the whole line). + * + * An empty string is treated as malformed because legacy "no scopes → + * full access" comes from a missing field 1, not an empty one. + */ +function parseScopesField(raw: string): Scope[] | '*' | null { + const value = raw.trim(); + if (value.length === 0) return null; + if (value === '*') return '*'; + const parts = value.split(',').map((s) => s.trim()).filter((s) => s.length > 0); + if (parts.length === 0) return null; + // Reject obviously-malformed scope strings. We allow `:` (the slice 06 + // verb-namespace separator), letters/digits/dash/dot/underscore. Reject + // whitespace and tabs and the wildcard mixed with other entries. + for (const p of parts) { + if (!/^[A-Za-z0-9_.:\-*]+$/.test(p)) return null; + } + // A list containing `*` is meaningless — `*` must stand alone. Reject. + if (parts.includes('*')) return null; + return parts; +} + +function truncateForLog(s: string): string { + return s.length > 80 ? s.slice(0, 77) + '...' : s; +} + +// --------------------------------------------------------------------------- +// Serializer +// --------------------------------------------------------------------------- + +/** + * Serialize a token store back to file content. Round-trip-safe for inputs + * produced by `parseTokenFile`: parsing the output of `serializeTokenStore` + * applied to a `ParsedTokenFile` yields the same `ParsedTokenFile`. + * + * Field-count discipline: + * - legacy `'*'` records with no name/createdAt → token-only line + * - explicit-`'*'` records with name → token + `*` + name + * - scoped records → token + scopes + (name?) + (createdAt?), trailing + * empty fields suppressed + * + * Comments and blank lines from `preserved` are re-emitted at their + * recorded positions. + */ +export function serializeTokenStore(parsed: ParsedTokenFile): string { + const { store, preserved } = parsed; + const records = [...store.values()]; + + const lines: string[] = []; + + // Pre-bucket preserved lines by the record-index they precede. Multiple + // entries with the same `index` keep their original order. + const buckets = new Map(); + for (const p of preserved) { + const bucket = buckets.get(p.index) ?? []; + bucket.push(p); + buckets.set(p.index, bucket); + } + + for (let i = 0; i < records.length; i++) { + const before = buckets.get(i); + if (before) { + for (const p of before) lines.push(p.text); + } + lines.push(formatRecord(records[i]!)); + } + + // Trailing preserved lines (after the last record). + const trailing = buckets.get(records.length); + if (trailing) { + for (const p of trailing) lines.push(p.text); + } + + return lines.length === 0 ? '' : lines.join('\n') + '\n'; +} + +function formatRecord(r: TokenRecord): string { + const fields: string[] = [r.fullToken]; + + // Legacy single-field shape: only used when scopes is '*' AND there's no + // name/createdAt. Any record with a name or timestamp must emit at + // least the scopes field so the parser knows the trailing fields are + // metadata, not part of a malformed token. + if (r.scopes === '*' && r.name === undefined && r.createdAt === undefined) { + return fields[0]!; + } + + fields.push(r.scopes === '*' ? '*' : r.scopes.join(',')); + + if (r.name !== undefined || r.createdAt !== undefined) { + fields.push(r.name ?? ''); + } + if (r.createdAt !== undefined) { + fields.push(r.createdAt); + } + + return fields.join('\t'); +} + +// --------------------------------------------------------------------------- +// Lookup helpers +// --------------------------------------------------------------------------- + +/** + * Return the record for a given full token, or undefined. Done as a + * sequential scan because the map is keyed by prefix and prefix collisions + * are possible (extremely rare for 32-byte base64url tokens — birthday + * collision on 8 chars of base64url is ~1 in 2^48 — but possible). On a + * collision we still want to find the record whose `fullToken` matches. + * + * For the common case (no collision) the prefix lookup hits in O(1) and + * we compare one full token. + */ +export function lookupTokenRecord( + fullToken: string | undefined, + store: TokenStore, +): TokenRecord | undefined { + if (!fullToken) return undefined; + const prefix = tokenPrefix(fullToken); + const candidate = store.get(prefix); + if (candidate && candidate.fullToken === fullToken) return candidate; + // Collision fallback: linear scan. + for (const r of store.values()) { + if (r.fullToken === fullToken) return r; + } + return undefined; +} + +/** + * Add a record. Returns the same store (mutated). If a prefix collision + * occurs (vanishingly unlikely with random 43-char tokens, but possible + * in tests / for short fixtures), the new record replaces the old one + * and the caller is responsible for any rollback. Mint flows MUST + * detect collisions before calling this — the API gives them the + * `lookupTokenRecord` helper for that. + */ +export function setTokenRecord(store: TokenStore, record: TokenRecord): TokenStore { + store.set(record.prefix, record); + return store; +} + +/** Remove a record by prefix. Returns true when an entry was deleted. */ +export function deleteTokenRecord(store: TokenStore, prefix: string): boolean { + return store.delete(prefix); +} + +/** + * Sanitized public view of a record — drops `fullToken`. List/get APIs + * MUST go through this to avoid leaking secrets in subsequent responses. + */ +export interface PublicTokenRecord { + prefix: string; + scopes: Scope[] | '*'; + name?: string; + createdAt?: string; +} + +export function toPublicRecord(r: TokenRecord): PublicTokenRecord { + const out: PublicTokenRecord = { prefix: r.prefix, scopes: r.scopes }; + if (r.name !== undefined) out.name = r.name; + if (r.createdAt !== undefined) out.createdAt = r.createdAt; + return out; +} diff --git a/packages/cli/test/auth-scopes.test.ts b/packages/cli/test/auth-scopes.test.ts new file mode 100644 index 000000000..5b468a4a2 --- /dev/null +++ b/packages/cli/test/auth-scopes.test.ts @@ -0,0 +1,81 @@ +/** + * Slice 06 — unit tests for `verifyTokenScope`. + * + * Pure function — no I/O, no fixtures. Property: `'*'` grants every + * scope; explicit lists grant exactly the listed scopes; unknown tokens + * fail closed. + */ +import { describe, expect, it } from 'vitest'; +import { verifyTokenScope, type TokenStore } from '../src/auth.js'; +import type { TokenRecord } from '../src/token-store.js'; + +function buildStore(records: TokenRecord[]): TokenStore { + const store = new Map(); + for (const r of records) store.set(r.prefix, r); + return store; +} + +describe('verifyTokenScope', () => { + it('grants any scope when the record is wildcard "*"', () => { + const store = buildStore([ + { prefix: 'rooty-tk', fullToken: 'rooty-tkABCDEF', scopes: '*' }, + ]); + expect(verifyTokenScope('rooty-tkABCDEF', 'kafka:endpoint:read', store)).toBe(true); + expect(verifyTokenScope('rooty-tkABCDEF', 'kafka:endpoint:write', store)).toBe(true); + expect(verifyTokenScope('rooty-tkABCDEF', 'arbitrary:scope:we-invent', store)).toBe(true); + }); + + it('grants only listed scopes for a scoped record', () => { + const store = buildStore([ + { + prefix: 'reader-1', + fullToken: 'reader-1XXXXXX', + scopes: ['kafka:endpoint:read'], + }, + ]); + expect(verifyTokenScope('reader-1XXXXXX', 'kafka:endpoint:read', store)).toBe(true); + expect(verifyTokenScope('reader-1XXXXXX', 'kafka:endpoint:write', store)).toBe(false); + expect(verifyTokenScope('reader-1XXXXXX', 'anything-else', store)).toBe(false); + }); + + it('returns false for unknown / undefined / empty tokens', () => { + const store = buildStore([ + { prefix: 'rooty-tk', fullToken: 'rooty-tkABCDEF', scopes: '*' }, + ]); + expect(verifyTokenScope(undefined, 'any', store)).toBe(false); + expect(verifyTokenScope('', 'any', store)).toBe(false); + expect(verifyTokenScope('not-in-store', 'any', store)).toBe(false); + }); + + it('does not perform glob/wildcard matching on non-"*" lists', () => { + // ADR-0003: no CG-bound scopes, no wildcards within lists. A scope + // string is exact-match against the required scope. + const store = buildStore([ + { prefix: 'glob-bad', fullToken: 'glob-badZZZZZZ', scopes: ['kafka:*'] }, + ]); + expect(verifyTokenScope('glob-badZZZZZZ', 'kafka:endpoint:read', store)).toBe(false); + // The literal "kafka:*" does match its own string, but that's a + // pathological scope name no caller should ever ask for. Documenting + // for future maintainers, not endorsing. + expect(verifyTokenScope('glob-badZZZZZZ', 'kafka:*', store)).toBe(true); + }); + + it('handles multiple scoped records in the same store independently', () => { + const store = buildStore([ + { + prefix: 'reader-x', + fullToken: 'reader-xAAAAAA', + scopes: ['kafka:endpoint:read'], + }, + { + prefix: 'writer-y', + fullToken: 'writer-yBBBBBB', + scopes: ['kafka:endpoint:write'], + }, + ]); + expect(verifyTokenScope('reader-xAAAAAA', 'kafka:endpoint:read', store)).toBe(true); + expect(verifyTokenScope('reader-xAAAAAA', 'kafka:endpoint:write', store)).toBe(false); + expect(verifyTokenScope('writer-yBBBBBB', 'kafka:endpoint:write', store)).toBe(true); + expect(verifyTokenScope('writer-yBBBBBB', 'kafka:endpoint:read', store)).toBe(false); + }); +}); diff --git a/packages/cli/test/daemon-auth-scopes.test.ts b/packages/cli/test/daemon-auth-scopes.test.ts new file mode 100644 index 000000000..e5ef560cb --- /dev/null +++ b/packages/cli/test/daemon-auth-scopes.test.ts @@ -0,0 +1,414 @@ +/** + * Slice 06 — auth scopes integration tests. + * + * The single most important test in this file is the backward-compat one: + * a legacy scope-less token (the format every existing deployment carries + * on disk) must continue to grant access to every kafka route. If that + * test ever goes red, every existing daemon's API surface silently locks + * up after a slice 06 upgrade — non-negotiable backward compat. + * + * The test uses a REAL on-disk token file (no mocks of token-store) so + * the legacy file format is exercised end to end through the same parser + * the daemon uses at startup. + * + * Mock surface: a stub `agent` with the same shape `handleKafkaRoutes` + * consumes — `query`, `update`, `publish`, `resolveAgentAddress`. No real + * chain or storage. The regression we are guarding is in the + * route-handler scope-check + token-store wiring, not the V10 publisher. + */ +import { Readable } from 'node:stream'; +import { mkdtemp, rm, writeFile } from 'node:fs/promises'; +import { join } from 'node:path'; +import { tmpdir } from 'node:os'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { handleKafkaRoutes } from '../src/daemon/routes/kafka.js'; +import { + loadTokenStore, + verifyTokenScope, + type TokenStore, +} from '../src/auth.js'; +import type { RequestContext } from '../src/daemon/routes/context.js'; + +const VALID_OWNER = '0xabcdefabcdefabcdefabcdefabcdefabcdefabcd'; +const VALID_URI = + `urn:dkg:kafka-endpoint:${VALID_OWNER}:33b58f60595c766739f72b29e4ee417888d1a46af8339a4b5bdb1c3a5692f652`; + +function buildMockReq( + method: string, + path: string, + body?: unknown, + authHeader?: string, +): Readable & { method: string; url: string; headers: Record } { + const stream = new Readable(); + if (body !== undefined) { + stream.push(typeof body === 'string' ? body : JSON.stringify(body)); + } + stream.push(null); + Object.assign(stream, { + method, + url: path, + headers: { authorization: authHeader }, + }); + return stream as Readable & { + method: string; + url: string; + headers: Record; + }; +} + +interface CapturedResponse { + status: number; + body: unknown; + headers: Record; +} + +function buildMockRes(): { res: any; captured: CapturedResponse } { + const captured: CapturedResponse = { status: 0, body: undefined, headers: {} }; + const res: any = { + headersSent: false, + writableEnded: false, + writeHead(status: number, headers?: Record) { + captured.status = status; + if (headers) Object.assign(captured.headers, headers); + return res; + }, + end(payload?: string) { + res.writableEnded = true; + if (payload !== undefined) { + try { + captured.body = JSON.parse(payload); + } catch { + captured.body = payload; + } + } + }, + }; + return { res, captured }; +} + +function buildAgent(opts: { + endpointBindings?: Array>; + listBindings?: Array>; +} = {}): any { + return { + query: vi.fn(async (_sparql: string, _opts?: { contextGraphId?: string }) => { + // Heuristic: list-shape vs single-shape. Tests only need "non-empty" + // bindings to exercise route-success paths; precise SPARQL discrimination + // is the kafka package's concern, exercised by package tests. + if (opts.endpointBindings) return { bindings: opts.endpointBindings }; + if (opts.listBindings) return { bindings: opts.listBindings }; + return { bindings: [] }; + }), + update: vi.fn(async () => ({})), + publish: vi.fn(async () => ({})), + resolveAgentAddress: vi.fn(() => VALID_OWNER), + }; +} + +function buildCtx( + req: any, + res: any, + agent: any, + store: TokenStore, + token: string | undefined, +): RequestContext { + const url = new URL(`http://localhost${req.url}`); + return { + req, + res, + agent, + path: url.pathname, + url, + requestAgentAddress: VALID_OWNER, + requestToken: token, + validTokens: new Set([...store.values()].map((r) => r.fullToken)), + tokenStore: store, + } as unknown as RequestContext; +} + +// ─────────────────────────────────────────────────────────────────────────── +// Backward compat — non-negotiable +// ─────────────────────────────────────────────────────────────────────────── + +describe('slice 06 — backward compat: legacy scope-less token', () => { + let dkgHome: string; + + beforeEach(async () => { + dkgHome = await mkdtemp(join(tmpdir(), 'dkg-auth-bwc-')); + process.env.DKG_HOME = dkgHome; + }); + + afterEach(async () => { + delete process.env.DKG_HOME; + await rm(dkgHome, { recursive: true, force: true }); + }); + + it('a legacy scope-less token file grants access to ALL kafka routes', async () => { + // Write a real legacy-format token file (single line, no TAB). + const legacyToken = 'legacy-no-scopes-token'; + await writeFile( + join(dkgHome, 'auth.token'), + `# DKG node API token — treat this like a password\n${legacyToken}\n`, + ); + + const store = await loadTokenStore(); + + // The token MUST verify against every conceivable scope (full access). + expect(verifyTokenScope(legacyToken, 'kafka:endpoint:read', store)).toBe(true); + expect(verifyTokenScope(legacyToken, 'kafka:endpoint:write', store)).toBe(true); + expect(verifyTokenScope(legacyToken, 'any-other-scope:we-dream-up', store)).toBe(true); + + // Hit each kafka route with the legacy token — every one must accept it + // and reach the route body (i.e. no 401/403 from the scope guard). + + const agent = buildAgent({ + endpointBindings: [ + { + endpoint: `<${VALID_URI}>`, + broker: '"k.example:9092"', + topic: '"orders"', + messageFormat: '"application/json"', + publisher: ``, + endpointUrl: '', + issued: '"2026-05-04T12:34:56.000Z"^^', + }, + ], + }); + + // GET list — kafka:endpoint:read + { + const req = buildMockReq( + 'GET', + `/api/kafka/endpoint?contextGraphId=cg1`, + undefined, + `Bearer ${legacyToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, legacyToken)); + expect([200, 500]).toContain(captured.status); // 500 acceptable for stubbed query, NOT 401/403 + expect(captured.status).not.toBe(401); + expect(captured.status).not.toBe(403); + } + + // GET single — kafka:endpoint:read + { + const req = buildMockReq( + 'GET', + `/api/kafka/endpoint/${encodeURIComponent(VALID_URI)}?contextGraphId=cg1`, + undefined, + `Bearer ${legacyToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, legacyToken)); + expect(captured.status).not.toBe(401); + expect(captured.status).not.toBe(403); + } + + // POST register — kafka:endpoint:write + { + const req = buildMockReq( + 'POST', + `/api/kafka/endpoint`, + { + contextGraphId: 'cg1', + broker: 'k.example:9092', + topic: 'orders', + messageFormat: 'application/json', + }, + `Bearer ${legacyToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, legacyToken)); + expect(captured.status).not.toBe(401); + expect(captured.status).not.toBe(403); + } + + // POST verify — kafka:endpoint:write + { + const req = buildMockReq( + 'POST', + `/api/kafka/endpoint/verify`, + { + contextGraphId: 'cg1', + uri: VALID_URI, + securityProtocol: 'PLAINTEXT', + }, + `Bearer ${legacyToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, legacyToken)); + expect(captured.status).not.toBe(401); + expect(captured.status).not.toBe(403); + } + + // DELETE revoke — kafka:endpoint:write + { + const req = buildMockReq( + 'DELETE', + `/api/kafka/endpoint/${encodeURIComponent(VALID_URI)}?contextGraphId=cg1`, + undefined, + `Bearer ${legacyToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, legacyToken)); + expect(captured.status).not.toBe(401); + expect(captured.status).not.toBe(403); + } + }); +}); + +// ─────────────────────────────────────────────────────────────────────────── +// Per-route scope enforcement +// ─────────────────────────────────────────────────────────────────────────── + +describe('slice 06 — per-route scope enforcement', () => { + let dkgHome: string; + + beforeEach(async () => { + dkgHome = await mkdtemp(join(tmpdir(), 'dkg-auth-scope-')); + process.env.DKG_HOME = dkgHome; + }); + + afterEach(async () => { + delete process.env.DKG_HOME; + await rm(dkgHome, { recursive: true, force: true }); + }); + + it('a read-scoped token is accepted on GET and rejected (403) on POST register', async () => { + const readToken = 'read-only-token-aaaaaaaaaaaaaaaaaaaaaaaaaaaa'; + await writeFile( + join(dkgHome, 'auth.token'), + `${readToken}\tkafka:endpoint:read\n`, + ); + const store = await loadTokenStore(); + + expect(verifyTokenScope(readToken, 'kafka:endpoint:read', store)).toBe(true); + expect(verifyTokenScope(readToken, 'kafka:endpoint:write', store)).toBe(false); + + const agent = buildAgent({ + listBindings: [], // empty list is fine for the contract test + }); + + // GET — must NOT 403 + { + const req = buildMockReq( + 'GET', + `/api/kafka/endpoint?contextGraphId=cg1`, + undefined, + `Bearer ${readToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, readToken)); + expect(captured.status).not.toBe(403); + } + + // POST register — must 403 + { + const req = buildMockReq( + 'POST', + `/api/kafka/endpoint`, + { + contextGraphId: 'cg1', + broker: 'k.example:9092', + topic: 'orders', + messageFormat: 'application/json', + }, + `Bearer ${readToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, readToken)); + expect(captured.status).toBe(403); + expect((captured.body as any)?.error).toContain('kafka:endpoint:write'); + // 403 must NOT carry a WWW-Authenticate header — that header signals + // "no/invalid credentials"; this is "wrong scope". + expect( + Object.keys(captured.headers).map((k) => k.toLowerCase()), + ).not.toContain('www-authenticate'); + } + + // POST verify — must 403 + { + const req = buildMockReq( + 'POST', + `/api/kafka/endpoint/verify`, + { contextGraphId: 'cg1', uri: VALID_URI, securityProtocol: 'PLAINTEXT' }, + `Bearer ${readToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, readToken)); + expect(captured.status).toBe(403); + } + + // DELETE revoke — must 403 + { + const req = buildMockReq( + 'DELETE', + `/api/kafka/endpoint/${encodeURIComponent(VALID_URI)}?contextGraphId=cg1`, + undefined, + `Bearer ${readToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, readToken)); + expect(captured.status).toBe(403); + } + }); + + it('a write-scoped token is accepted on POST and rejected (403) on GET list', async () => { + const writeToken = 'write-only-token-bbbbbbbbbbbbbbbbbbbbbbbbbbb'; + await writeFile( + join(dkgHome, 'auth.token'), + `${writeToken}\tkafka:endpoint:write\n`, + ); + const store = await loadTokenStore(); + + expect(verifyTokenScope(writeToken, 'kafka:endpoint:write', store)).toBe(true); + expect(verifyTokenScope(writeToken, 'kafka:endpoint:read', store)).toBe(false); + + const agent = buildAgent(); + + // GET list — must 403 + { + const req = buildMockReq( + 'GET', + `/api/kafka/endpoint?contextGraphId=cg1`, + undefined, + `Bearer ${writeToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, writeToken)); + expect(captured.status).toBe(403); + expect((captured.body as any)?.error).toContain('kafka:endpoint:read'); + } + + // GET single — must 403 + { + const req = buildMockReq( + 'GET', + `/api/kafka/endpoint/${encodeURIComponent(VALID_URI)}?contextGraphId=cg1`, + undefined, + `Bearer ${writeToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, writeToken)); + expect(captured.status).toBe(403); + } + + // POST register — must NOT 403 + { + const req = buildMockReq( + 'POST', + `/api/kafka/endpoint`, + { + contextGraphId: 'cg1', + broker: 'k.example:9092', + topic: 'orders', + messageFormat: 'application/json', + }, + `Bearer ${writeToken}`, + ); + const { res, captured } = buildMockRes(); + await handleKafkaRoutes(buildCtx(req, res, agent, store, writeToken)); + expect(captured.status).not.toBe(403); + } + }); +}); diff --git a/packages/cli/test/kafka-route-verify.test.ts b/packages/cli/test/kafka-route-verify.test.ts index 2e8a122f6..5025b25db 100644 --- a/packages/cli/test/kafka-route-verify.test.ts +++ b/packages/cli/test/kafka-route-verify.test.ts @@ -106,9 +106,25 @@ function buildMockAgent(opts: MockAgentOptions = {}): any { function buildCtx(req: any, res: any, agent: any): RequestContext { // Many RequestContext fields are unused by handleKafkaRoutes; cast to any // so we don't need a full DKGAgent / DkgConfig / OperationTracker. The - // route reads only `req`, `res`, `agent`, `path`, `url`, and - // `requestAgentAddress` — all populated below. + // route reads only `req`, `res`, `agent`, `path`, `url`, + // `requestAgentAddress`, plus slice 06's `requestToken` + `tokenStore` + // for the scope guard — all populated below. The store maps the test + // bearer token to scope `'*'` so verify-route assertions are unaffected + // by the scope check (covered separately in `daemon-auth-scopes.test.ts`). const url = new URL(`http://localhost${req.url}`); + const ROOT_TOKEN = 'ROOTROOT-verify-test-token'; + const tokenStore = new Map([ + [ROOT_TOKEN.slice(0, 8), { + prefix: ROOT_TOKEN.slice(0, 8), + fullToken: ROOT_TOKEN, + scopes: '*' as const, + }], + ]); + // Mutate req headers so extractBearerToken finds the root token. The + // route reads `ctx.requestToken` directly when set, but the daemon + // pipeline derives it from req.headers.authorization — populating both + // keeps the mock honest if either path is exercised. + if (req.headers) req.headers.authorization = `Bearer ${ROOT_TOKEN}`; return { req, res, @@ -116,6 +132,9 @@ function buildCtx(req: any, res: any, agent: any): RequestContext { path: url.pathname, url, requestAgentAddress: '0xabcdefabcdefabcdefabcdefabcdefabcdefabcd', + requestToken: ROOT_TOKEN, + tokenStore, + validTokens: new Set([ROOT_TOKEN]), } as unknown as RequestContext; } diff --git a/packages/cli/test/token-store.test.ts b/packages/cli/test/token-store.test.ts new file mode 100644 index 000000000..7e51316b5 --- /dev/null +++ b/packages/cli/test/token-store.test.ts @@ -0,0 +1,331 @@ +/** + * Slice 06 — pure unit tests for `token-store.ts`. + * + * No I/O. The token-store module is intentionally pure (parse + serialize + * + lookup); the real fs touches happen in `auth.ts`. Tests cover: + * - legacy-only files (every line lacks a TAB) + * - scoped-only files + * - mixed files (legacy + scoped + scoped-with-name + full record) + * - malformed lines (skipped, not crashing) + * - round-trip determinism (parse → serialize → parse) + * - lookup helpers (prefix collision fallback, public-record sanitization) + */ +import { describe, expect, it, vi } from 'vitest'; +import { + parseTokenFile, + serializeTokenStore, + lookupTokenRecord, + toPublicRecord, + tokenPrefix, + setTokenRecord, + deleteTokenRecord, + type TokenRecord, +} from '../src/token-store.js'; + +// ─────────────────────────────────────────────────────────────────────────── +// parser: legacy / scoped / mixed +// ─────────────────────────────────────────────────────────────────────────── + +describe('parseTokenFile — legacy-only', () => { + it('parses a single legacy token line as scopes="*"', () => { + const { store } = parseTokenFile('legacy-token-aaaaaaaaaaaaa\n'); + expect(store.size).toBe(1); + const r = [...store.values()][0]!; + expect(r.fullToken).toBe('legacy-token-aaaaaaaaaaaaa'); + expect(r.scopes).toBe('*'); + expect(r.name).toBeUndefined(); + expect(r.createdAt).toBeUndefined(); + expect(r.prefix).toBe('legacy-t'); + }); + + it('parses multiple legacy lines, preserves comments + blank lines', () => { + const raw = `# DKG node API token — treat this like a password +alphalegacy-token-1aaaaaaaa + +# secondary +betalegacy-token-2bbbbbbbb +`; + const { store, preserved } = parseTokenFile(raw); + expect(store.size).toBe(2); + expect([...store.values()].every((r) => r.scopes === '*')).toBe(true); + expect(preserved.length).toBe(3); // header comment, blank, secondary comment + }); +}); + +describe('parseTokenFile — scoped-only', () => { + it('parses scopes from a comma-separated list', () => { + const { store } = parseTokenFile('tk-aaaaaaaaaaaaaaaa\tkafka:endpoint:read,kafka:endpoint:write\n'); + const r = [...store.values()][0]!; + expect(r.scopes).toEqual(['kafka:endpoint:read', 'kafka:endpoint:write']); + }); + + it('parses an explicit "*" scope as full access', () => { + const { store } = parseTokenFile('tk-bbbbbbbbbbbbbbbb\t*\n'); + const r = [...store.values()][0]!; + expect(r.scopes).toBe('*'); + }); + + it('parses scopes + name', () => { + const { store } = parseTokenFile('tk-cccccccccccccccc\tkafka:endpoint:read\tcatchup-bot\n'); + const r = [...store.values()][0]!; + expect(r.scopes).toEqual(['kafka:endpoint:read']); + expect(r.name).toBe('catchup-bot'); + expect(r.createdAt).toBeUndefined(); + }); + + it('parses a full record (scopes + name + createdAt)', () => { + const raw = 'tk-dddddddddddddddd\tkafka:endpoint:read\tcatchup-bot\t2026-05-04T12:00:00.000Z\n'; + const { store } = parseTokenFile(raw); + const r = [...store.values()][0]!; + expect(r.scopes).toEqual(['kafka:endpoint:read']); + expect(r.name).toBe('catchup-bot'); + expect(r.createdAt).toBe('2026-05-04T12:00:00.000Z'); + }); +}); + +describe('parseTokenFile — mixed', () => { + it('parses mixed legacy + scoped lines in the same file', () => { + const raw = `# old +legacy-aaaaaaaaaaaaaaaaaaaaa +scoped-bbbbbbbbbbbbbbbbbbbbb\tkafka:endpoint:read +named-ccccccccccccccccccccc\tkafka:endpoint:write\tcatchup +full-ddddddddddddddddddddd\t*\troot\t2026-05-04T12:00:00.000Z +`; + const { store } = parseTokenFile(raw); + expect(store.size).toBe(4); + const records = [...store.values()]; + expect(records[0]!.scopes).toBe('*'); + expect(records[0]!.name).toBeUndefined(); + expect(records[1]!.scopes).toEqual(['kafka:endpoint:read']); + expect(records[2]!.name).toBe('catchup'); + expect(records[3]!.scopes).toBe('*'); + expect(records[3]!.createdAt).toBe('2026-05-04T12:00:00.000Z'); + }); +}); + +// ─────────────────────────────────────────────────────────────────────────── +// parser: malformed inputs +// ─────────────────────────────────────────────────────────────────────────── + +describe('parseTokenFile — malformed lines', () => { + it('skips lines with empty token field, warning sink fires', () => { + const warnings: string[] = []; + const { store } = parseTokenFile('\tkafka:endpoint:read\nvalid-token-aaaaaaaaaaaaaaa\n', { + onWarning: (m) => warnings.push(m), + }); + expect(store.size).toBe(1); + expect(warnings.length).toBe(1); + expect(warnings[0]).toContain('empty token'); + }); + + it('skips lines with empty scopes field (token + bare TAB)', () => { + const warnings: string[] = []; + const { store } = parseTokenFile('tk-eeeeeeeeeeeeeeee\t\nvalid-token-bbbbbbbbbbb\n', { + onWarning: (m) => warnings.push(m), + }); + expect(store.size).toBe(1); + expect(warnings[0]).toContain('bad scopes field'); + }); + + it('skips lines with too many tab fields (>4)', () => { + const warnings: string[] = []; + const raw = 'tk-fffffffffffffffff\tkafka:endpoint:read\tname\t2026-05-04T12:00:00.000Z\textra\n'; + const { store } = parseTokenFile(raw, { onWarning: (m) => warnings.push(m) }); + expect(store.size).toBe(0); + expect(warnings[0]).toContain('too many tab-separated fields'); + }); + + it('skips lines with disallowed scope characters (whitespace/special)', () => { + const warnings: string[] = []; + const { store } = parseTokenFile('tk-gggggggggggggggg\tbad scope with space\n', { + onWarning: (m) => warnings.push(m), + }); + expect(store.size).toBe(0); + expect(warnings[0]).toContain('bad scopes field'); + }); + + it('skips lines that mix `*` with explicit scopes (privilege-escalation guard)', () => { + const warnings: string[] = []; + const { store } = parseTokenFile('tk-hhhhhhhhhhhhhhhh\t*,kafka:endpoint:read\n', { + onWarning: (m) => warnings.push(m), + }); + expect(store.size).toBe(0); + expect(warnings[0]).toContain('bad scopes field'); + }); + + it('keeps the first record on prefix collision and warns', () => { + const warnings: string[] = []; + const raw = `firstcollideZZZZZ\nfirstcollideQQQQQ\n`; + const { store } = parseTokenFile(raw, { onWarning: (m) => warnings.push(m) }); + expect(store.size).toBe(1); + expect([...store.values()][0]!.fullToken).toBe('firstcollideZZZZZ'); + expect(warnings[0]).toContain('duplicate token prefix'); + }); + + it('does not crash on empty file', () => { + const { store, preserved } = parseTokenFile(''); + expect(store.size).toBe(0); + expect(preserved.length).toBe(0); + }); + + it('does not crash on file with only comments/blank lines', () => { + const { store, preserved } = parseTokenFile('# just a comment\n\n# another\n'); + expect(store.size).toBe(0); + expect(preserved.length).toBe(3); + }); + + it('handles CRLF-terminated lines', () => { + const { store } = parseTokenFile('legacy-crlf-token-aaa\r\nscoped-crlf-bbb\tkafka:endpoint:read\r\n'); + expect(store.size).toBe(2); + const records = [...store.values()]; + expect(records[0]!.fullToken).toBe('legacy-crlf-token-aaa'); + expect(records[1]!.scopes).toEqual(['kafka:endpoint:read']); + }); +}); + +// ─────────────────────────────────────────────────────────────────────────── +// serializer + round-trip +// ─────────────────────────────────────────────────────────────────────────── + +describe('serializeTokenStore', () => { + it('round-trips a legacy-only file byte-identically', () => { + const raw = `# DKG node API token — treat this like a password +legacy-tok-aaaaaaaaaaaaaaaaaa +`; + const parsed = parseTokenFile(raw); + const out = serializeTokenStore(parsed); + expect(out).toBe(raw); + }); + + it('round-trips a fully-formed scoped file byte-identically', () => { + const raw = `# header +tok-aaaaaaaaaaaaaaaaaaaaa\tkafka:endpoint:read\tbot\t2026-05-04T12:00:00.000Z + +# trailing comment +tok-bbbbbbbbbbbbbbbbbbbbb\t*\troot\t2026-05-04T12:01:00.000Z +`; + const parsed = parseTokenFile(raw); + const out = serializeTokenStore(parsed); + expect(out).toBe(raw); + }); + + it('emits a single-field line for a `*`-scope record with no name/createdAt', () => { + const out = serializeTokenStore({ + store: new Map([ + ['legacy-x', { + prefix: 'legacy-x', + fullToken: 'legacy-xxxx', + scopes: '*' as const, + }], + ]), + preserved: [], + }); + expect(out).toBe('legacy-xxxx\n'); + }); + + it('emits scopes+empty-name when only createdAt is set on a scoped record', () => { + // Edge case: the parser exposes name=undefined, createdAt=string — the + // serializer must keep the createdAt by emitting an empty name field + // so the field-count survives. + const out = serializeTokenStore({ + store: new Map([ + ['scope-on', { + prefix: 'scope-on', + fullToken: 'scope-on-token', + scopes: ['kafka:endpoint:read'], + createdAt: '2026-05-04T12:00:00.000Z', + }], + ]), + preserved: [], + }); + expect(out).toBe('scope-on-token\tkafka:endpoint:read\t\t2026-05-04T12:00:00.000Z\n'); + }); + + it('round-trips an empty file', () => { + const out = serializeTokenStore({ store: new Map(), preserved: [] }); + expect(out).toBe(''); + }); +}); + +// ─────────────────────────────────────────────────────────────────────────── +// lookup helpers +// ─────────────────────────────────────────────────────────────────────────── + +describe('lookupTokenRecord', () => { + it('returns the record for a known token', () => { + const { store } = parseTokenFile('the-token-aaaaaaaaaaaaaa\tkafka:endpoint:read\n'); + const r = lookupTokenRecord('the-token-aaaaaaaaaaaaaa', store); + expect(r?.fullToken).toBe('the-token-aaaaaaaaaaaaaa'); + }); + + it('returns undefined for an unknown token', () => { + const { store } = parseTokenFile('the-token-aaaaaaaaaaaaaa\n'); + expect(lookupTokenRecord('not-stored', store)).toBeUndefined(); + expect(lookupTokenRecord(undefined, store)).toBeUndefined(); + }); + + it('falls back to linear scan on prefix collision', () => { + // Force a manual store with two records sharing a prefix. The parser + // would warn-and-skip the second; the lookup helper must still find a + // legitimate insertion (e.g. via setTokenRecord overriding). + const store = new Map(); + store.set('prefix01', { prefix: 'prefix01', fullToken: 'prefix01-XXXX', scopes: '*' }); + // Intentional second-record-with-same-prefix in a wrong key (wouldn't + // happen via parser but represents the collision condition). + store.set('prefix02', { prefix: 'prefix01', fullToken: 'prefix01-YYYY', scopes: ['kafka:endpoint:read'] }); + const r = lookupTokenRecord('prefix01-YYYY', store); + expect(r?.scopes).toEqual(['kafka:endpoint:read']); + }); +}); + +describe('toPublicRecord', () => { + it('drops the fullToken', () => { + const r: TokenRecord = { + prefix: 'pr', + fullToken: 'long-secret-stuff', + scopes: ['kafka:endpoint:read'], + name: 'bot', + createdAt: '2026-05-04T12:00:00.000Z', + }; + const pub = toPublicRecord(r); + expect((pub as any).fullToken).toBeUndefined(); + expect(pub.prefix).toBe('pr'); + expect(pub.scopes).toEqual(['kafka:endpoint:read']); + expect(pub.name).toBe('bot'); + expect(pub.createdAt).toBe('2026-05-04T12:00:00.000Z'); + }); + + it('elides undefined name/createdAt instead of emitting them', () => { + const r: TokenRecord = { prefix: 'pr', fullToken: 'x', scopes: '*' }; + const pub = toPublicRecord(r); + expect(Object.keys(pub).sort()).toEqual(['prefix', 'scopes']); + }); +}); + +describe('store mutation helpers', () => { + it('setTokenRecord adds and replaces by prefix', () => { + const store = new Map(); + setTokenRecord(store, { prefix: 'aa', fullToken: 'aaa-1', scopes: '*' }); + expect(store.size).toBe(1); + setTokenRecord(store, { prefix: 'aa', fullToken: 'aaa-2', scopes: ['x:y'] }); + expect(store.size).toBe(1); + expect(store.get('aa')!.fullToken).toBe('aaa-2'); + }); + + it('deleteTokenRecord removes by prefix and reports presence', () => { + const store = new Map(); + setTokenRecord(store, { prefix: 'aa', fullToken: 'aaa', scopes: '*' }); + expect(deleteTokenRecord(store, 'aa')).toBe(true); + expect(deleteTokenRecord(store, 'aa')).toBe(false); + }); +}); + +describe('tokenPrefix', () => { + it('returns the first 8 characters', () => { + expect(tokenPrefix('abcdefghij')).toBe('abcdefgh'); + }); + + it('returns the whole token if shorter than 8 chars', () => { + expect(tokenPrefix('short')).toBe('short'); + }); +}); From 05f1fd96436fb67d813f6e7a5b4fb9bf6ad4fd4e Mon Sep 17 00:00:00 2001 From: Zvonimir Date: Tue, 5 May 2026 14:29:05 +0200 Subject: [PATCH 2/7] feat(cli): add /api/auth/tokens mint/list/revoke routes (slice 06 phase 2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Three new root-only verbs for token administration (ADR-0003): POST /api/auth/tokens mint a scoped token GET /api/auth/tokens list (prefix + scopes only — never the secret) DELETE /api/auth/tokens/ revoke (idempotent: 204 hit / 404 miss) Critical contracts pinned by tests: - Root-only via `record.scopes === '*'`. Any explicitly-scoped caller gets 403 — privilege-escalation guard. ADR-0003 explicitly forbids an `auth:tokens:*` scope; both that prefix and a wildcard `*` mint attempt are rejected with 400. - Mint response carries the full token EXACTLY ONCE (`{token, prefix, scopes, name?, createdAt}`). List/get responses go through `toPublicRecord` which strips `fullToken`. Tests assert the secret string never appears anywhere in the serialized list response. - Read-modify-write critical section serialized by an in-process mutex. The on-disk write goes through `writeFileAtomic` from `daemon/fs-utils.ts` (POSIX-rename atomic). Concurrency test (`token-store-concurrency.test.ts`) runs Promise.all of 10 parallel mints and asserts the resulting file re-parses with all 10 records + zero malformed-line warnings. - DELETE refuses to revoke the bearer token used to make the request (400) — guards against locking yourself out. Wiring: - New `daemon/routes/auth.ts` (~270 LOC) with a single `handleAuthRoutes` exporting the three handlers. - `handle-request.ts` adds the dispatch after `handleKafkaRoutes`. - The new routes go through the standard `httpAuthGuard` for auth presence (401 if no token), THEN do the root-only check (403 if scope ≠ '*'). They are NOT in `PUBLIC_PATHS`. Tests: - `daemon-token-mint.test.ts`: 14 cases — mint shapes (string / array / comma-separated), wildcard + auth:tokens:* rejection, scoped-can't-mint 403, list-without-secrets, revoke 204/404, self-revoke 400, end-to-end mint→list→revoke. - `token-store-concurrency.test.ts`: parallel mints invariant. Co-Authored-By: Claude Opus 4.7 (1M context) --- packages/cli/src/daemon/handle-request.ts | 4 + packages/cli/src/daemon/routes/auth.ts | 357 ++++++++++++++ packages/cli/test/daemon-token-mint.test.ts | 445 ++++++++++++++++++ .../cli/test/token-store-concurrency.test.ts | 171 +++++++ 4 files changed, 977 insertions(+) create mode 100644 packages/cli/src/daemon/routes/auth.ts create mode 100644 packages/cli/test/daemon-token-mint.test.ts create mode 100644 packages/cli/test/token-store-concurrency.test.ts diff --git a/packages/cli/src/daemon/handle-request.ts b/packages/cli/src/daemon/handle-request.ts index 6be520bee..35461c2cb 100644 --- a/packages/cli/src/daemon/handle-request.ts +++ b/packages/cli/src/daemon/handle-request.ts @@ -331,6 +331,7 @@ import { handleQueryRoutes } from './routes/query.js'; import { handleLocalAgentsRoutes } from './routes/local-agents.js'; import { handleEpcisRoutes } from './routes/epcis.js'; import { handleKafkaRoutes } from './routes/kafka.js'; +import { handleAuthRoutes } from './routes/auth.js'; export async function handleRequest( @@ -442,5 +443,8 @@ export async function handleRequest( await handleKafkaRoutes(ctx); if (res.writableEnded) return; + await handleAuthRoutes(ctx); + if (res.writableEnded) return; + jsonResponse(res, 404, { error: 'Not found' }); } diff --git a/packages/cli/src/daemon/routes/auth.ts b/packages/cli/src/daemon/routes/auth.ts new file mode 100644 index 000000000..54b1ce954 --- /dev/null +++ b/packages/cli/src/daemon/routes/auth.ts @@ -0,0 +1,357 @@ +/** + * `/api/auth/tokens` route group — root-only token administration. + * + * Slice 06 (ADR-0003). Adds three verbs: + * + * POST /api/auth/tokens mint a scoped token + * GET /api/auth/tokens list (prefix + scopes only — never the secret) + * DELETE /api/auth/tokens/ revoke by prefix (idempotent: 204 on hit, 404 on miss) + * + * Root-only: a "root" caller is one whose bearer token's scope set is + * `'*'` in the loaded store. Every other caller (any explicitly-scoped + * token) gets 403. ADR-0003 explicitly forbids an `auth:tokens:write` + * scope — letting a scope mint scopes is a privilege-escalation path. + * + * The full token is returned ONCE on mint. Subsequent list/get responses + * carry only `{prefix, scopes, name?, createdAt?}`. Storing the token + * (so `verifyToken` matches on later requests) goes through + * `writeFileAtomic` from `daemon/fs-utils.ts` — same atomic-rename + * pattern the auto-update flow uses, so concurrent CLI + API mints + * cannot interleave bytes. + */ + +import { randomBytes } from 'node:crypto'; +import { mkdir, chmod, readFile } from 'node:fs/promises'; +import { existsSync } from 'node:fs'; +import { dirname } from 'node:path'; +import { + jsonResponse, + readBody, + safeDecodeURIComponent, +} from '../http-utils.js'; +import { + lookupTokenRecord, + parseTokenFile, + serializeTokenStore, + setTokenRecord, + deleteTokenRecord, + toPublicRecord, + tokenFilePath, + tokenPrefix, + type Scope, + type TokenRecord, +} from '../../auth.js'; +import { writeFileAtomic } from '../fs-utils.js'; +import type { RequestContext } from './context.js'; + +const BASE_PATH = '/api/auth/tokens'; + +/** + * Serialization mutex. The on-disk write goes through `writeFileAtomic` + * (POSIX-rename atomic), but rename atomicity only guarantees that the + * resulting file is one of the inputs — under concurrent N parallel + * writes you'd lose N-1 of them because each write reads-modifies-writes + * a snapshot of the in-memory state. + * + * The mutex serializes the read-modify-write critical section on a + * SINGLE-PROCESS basis. Multi-process mints (e.g. simultaneous CLI + + * daemon API call) need a separate file-lock — out of scope for slice 06, + * documented as a known limitation. + */ +let mintMutex: Promise = Promise.resolve(); +async function withMintMutex(work: () => Promise): Promise { + const prev = mintMutex; + let release!: () => void; + mintMutex = new Promise((r) => { release = r; }); + try { + await prev; + return await work(); + } finally { + release(); + } +} + +/** + * Generate a fresh secret. Same shape as the daemon's first-run token + * (32 bytes of entropy → base64url) — keeps the on-disk representation + * uniform across legacy and scoped lines. + */ +function generateSecret(): string { + return randomBytes(32).toString('base64url'); +} + +/** + * Verify the caller is "root" (scope set is exactly `'*'`). Sends a 403 + * + returns false if not. We accept ONLY the wildcard — an explicit list + * that happens to include every known scope is NOT root, because new + * scopes will be added over time and we don't want to grandfather old + * lists into administering them. + */ +function requireRoot(ctx: RequestContext): boolean { + const record = lookupTokenRecord(ctx.requestToken, ctx.tokenStore); + if (!record || record.scopes !== '*') { + jsonResponse(ctx.res, 403, { + error: 'Root token required for token administration', + }); + return false; + } + return true; +} + +export async function handleAuthRoutes(ctx: RequestContext): Promise { + const { req, path } = ctx; + + if (req.method === 'POST' && path === BASE_PATH) { + if (!requireRoot(ctx)) return; + return handleMint(ctx); + } + + if (req.method === 'GET' && path === BASE_PATH) { + if (!requireRoot(ctx)) return; + return handleList(ctx); + } + + if (req.method === 'DELETE' && path.startsWith(`${BASE_PATH}/`)) { + if (!requireRoot(ctx)) return; + return handleRevoke(ctx); + } +} + +// ─────────────────────────────────────────────────────────────────────────── +// POST /api/auth/tokens — mint +// ─────────────────────────────────────────────────────────────────────────── + +interface MintRequestBody { + scope?: unknown; + scopes?: unknown; + name?: unknown; +} + +/** + * Parse the request body into a normalized `{scopes, name?}` shape. + * Returns null on a parse error (a 400 has already been written). + * + * The HTTP API accepts both `scope` and `scopes`; both can be either a + * comma-separated string or an array of strings. The CLI sends `scope` + * as a string for ergonomic flag handling, but UI clients tend to send + * arrays — so we accept both. + */ +function parseMintBody( + ctx: RequestContext, + body: MintRequestBody, +): { scopes: Scope[] | '*'; name?: string } | null { + const { res } = ctx; + const rawScopes = body.scopes ?? body.scope; + + if (rawScopes === undefined || rawScopes === null) { + jsonResponse(res, 400, { + error: '"scope" (or "scopes") is required: comma-separated string or array', + }); + return null; + } + + let scopeList: string[]; + if (Array.isArray(rawScopes)) { + if (!rawScopes.every((s): s is string => typeof s === 'string')) { + jsonResponse(res, 400, { error: '"scopes" array entries must all be strings' }); + return null; + } + scopeList = rawScopes.map((s) => s.trim()).filter((s) => s.length > 0); + } else if (typeof rawScopes === 'string') { + scopeList = rawScopes.split(',').map((s) => s.trim()).filter((s) => s.length > 0); + } else { + jsonResponse(res, 400, { error: '"scope" must be a string or array of strings' }); + return null; + } + + if (scopeList.length === 0) { + jsonResponse(res, 400, { error: 'at least one scope is required' }); + return null; + } + + // ADR-0003 forbids both wildcard mints AND an auth-management scope: + // - wildcard `*` would mint a new root (privilege escalation). + // - `auth:tokens:*` would let a scoped token mint scoped tokens + // (privilege escalation). + // Both checks fire BEFORE the character allowlist so the operator + // gets a precise diagnostic message instead of "disallowed characters". + if (scopeList.includes('*')) { + jsonResponse(res, 400, { + error: 'wildcard scope "*" cannot be minted via API (root tokens are operator-managed)', + }); + return null; + } + for (const s of scopeList) { + if (s.startsWith('auth:tokens:')) { + jsonResponse(res, 400, { + error: `scope "${s}" is reserved (token administration is root-only)`, + }); + return null; + } + // Character allowlist (no `*` — that's the wildcard case above). + if (!/^[A-Za-z0-9_.:\-]+$/.test(s)) { + jsonResponse(res, 400, { error: `scope "${s}" contains disallowed characters` }); + return null; + } + } + + let name: string | undefined; + if (body.name !== undefined && body.name !== null) { + if (typeof body.name !== 'string') { + jsonResponse(res, 400, { error: '"name" must be a string when provided' }); + return null; + } + const trimmed = body.name.trim(); + if (trimmed.length > 0) { + // The on-disk format uses TAB as a field separator — names with TABs + // would corrupt the row. Reject early with a clear message. + if (trimmed.includes('\t') || trimmed.includes('\n')) { + jsonResponse(res, 400, { + error: '"name" cannot contain tab or newline characters', + }); + return null; + } + name = trimmed; + } + } + + const result: { scopes: Scope[] | '*'; name?: string } = { scopes: scopeList }; + if (name !== undefined) result.name = name; + return result; +} + +async function handleMint(ctx: RequestContext): Promise { + const { req, res } = ctx; + + const rawBody = await readBody(req); + let parsed: MintRequestBody; + try { + parsed = (rawBody.length === 0 ? {} : JSON.parse(rawBody)) as MintRequestBody; + } catch { + return jsonResponse(res, 400, { error: 'Invalid JSON in request body' }); + } + if (typeof parsed !== 'object' || parsed === null || Array.isArray(parsed)) { + return jsonResponse(res, 400, { error: 'Body must be a JSON object' }); + } + + const fields = parseMintBody(ctx, parsed); + if (fields === null) return; + + const newToken = generateSecret(); + const record: TokenRecord = { + prefix: tokenPrefix(newToken), + fullToken: newToken, + scopes: fields.scopes, + createdAt: new Date().toISOString(), + }; + if (fields.name !== undefined) record.name = fields.name; + + await withMintMutex(async () => { + // Read-modify-write: re-read the file inside the mutex so we don't + // clobber records added by a concurrent CLI mint that landed + // BETWEEN the daemon's startup load and now. Without this, two + // sequential API mints would each append their record but only + // the second would survive on disk. + const filePath = tokenFilePath(); + const existing = existsSync(filePath) + ? parseTokenFile(await readFile(filePath, 'utf-8'), { + onWarning: (msg) => console.warn(`[auth] ${msg}`), + }) + : { store: new Map(), preserved: [] as Array<{ text: string; index: number }> }; + + setTokenRecord(existing.store, record); + + // Mirror into the in-memory store so subsequent requests within + // this daemon process accept the new token without a restart. + setTokenRecord(ctx.tokenStore, record); + ctx.validTokens.add(newToken); + + await mkdir(dirname(filePath), { recursive: true }); + await writeFileAtomic(filePath, serializeTokenStore(existing)); + await chmod(filePath, 0o600); + }); + + // Mint response — the ONLY surface that returns the full token. + // Subsequent GET/DELETE responses MUST never include `token`. + return jsonResponse(res, 201, { + token: newToken, + prefix: record.prefix, + scopes: record.scopes, + ...(record.name !== undefined ? { name: record.name } : {}), + createdAt: record.createdAt, + }); +} + +// ─────────────────────────────────────────────────────────────────────────── +// GET /api/auth/tokens — list (no secrets) +// ─────────────────────────────────────────────────────────────────────────── + +async function handleList(ctx: RequestContext): Promise { + const { res } = ctx; + // `toPublicRecord` strips `fullToken`. Keep the iteration order from + // the in-memory store (insertion order) so the operator sees the + // mint order they remember. + const tokens = [...ctx.tokenStore.values()].map(toPublicRecord); + return jsonResponse(res, 200, { tokens }); +} + +// ─────────────────────────────────────────────────────────────────────────── +// DELETE /api/auth/tokens/ — revoke +// ─────────────────────────────────────────────────────────────────────────── + +async function handleRevoke(ctx: RequestContext): Promise { + const { res, path } = ctx; + const encoded = path.slice(`${BASE_PATH}/`.length); + if (!encoded) { + return jsonResponse(res, 400, { error: 'Missing token prefix in path' }); + } + if (encoded.includes('/')) { + return jsonResponse(res, 404, { error: 'Not found' }); + } + const prefix = safeDecodeURIComponent(encoded, res); + if (prefix === null) return; // 400 already sent + + // Guard against revoking yourself out of the system. We'd survive in + // memory until the next process restart, but the on-disk file would + // no longer carry the token, so the next daemon start would auto- + // generate a fresh one — disruptive and easy to footgun. Block it + // and tell the caller why. + const callerRecord = lookupTokenRecord(ctx.requestToken, ctx.tokenStore); + if (callerRecord && callerRecord.prefix === prefix) { + return jsonResponse(res, 400, { + error: 'Refusing to revoke the bearer token used to make this request', + }); + } + + const result = await withMintMutex(async () => { + const filePath = tokenFilePath(); + if (!existsSync(filePath)) return { found: false }; + + const existing = parseTokenFile(await readFile(filePath, 'utf-8'), { + onWarning: (msg) => console.warn(`[auth] ${msg}`), + }); + const target = existing.store.get(prefix); + if (!target) return { found: false }; + + deleteTokenRecord(existing.store, prefix); + deleteTokenRecord(ctx.tokenStore, prefix); + ctx.validTokens.delete(target.fullToken); + + await writeFileAtomic(filePath, serializeTokenStore(existing)); + await chmod(filePath, 0o600); + return { found: true }; + }); + + if (!result.found) { + // Documented choice: 404 on miss (NOT 204). The semantic is "the + // resource you tried to delete doesn't exist", which 404 expresses + // more honestly than 204's "I deleted it" (lying about the world). + // Idempotency is preserved in the sense that repeating the call + // continues to return 404 deterministically. + return jsonResponse(res, 404, { error: `No token with prefix "${prefix}"` }); + } + // 204 has no body per RFC 7230. Bypass `jsonResponse` (which always + // writes one) so we don't ship `null` as a body. + res.writeHead(204); + res.end(); +} diff --git a/packages/cli/test/daemon-token-mint.test.ts b/packages/cli/test/daemon-token-mint.test.ts new file mode 100644 index 000000000..e8df92242 --- /dev/null +++ b/packages/cli/test/daemon-token-mint.test.ts @@ -0,0 +1,445 @@ +/** + * Slice 06 — integration tests for `/api/auth/tokens` (mint / list / revoke). + * + * Stands up `handleAuthRoutes` directly with a real on-disk auth file so + * the read-modify-write critical section is exercised end to end. No + * mocks of `token-store`. Critical contracts pinned: + * + * - root token (scopes='*') CAN mint, list, revoke + * - any explicitly-scoped token CANNOT (403 — privilege-escalation guard) + * - mint response carries the full token EXACTLY ONCE; subsequent + * list/get responses MUST not leak it + * - file write goes through writeFileAtomic (POSIX-rename atomic); + * a parallel-mints test covers the well-formed-file invariant + * - DELETE returns 204 on hit, 404 on miss; refuses to revoke the + * bearer token the request was made with + */ +import { Readable } from 'node:stream'; +import { mkdtemp, readFile, rm, writeFile } from 'node:fs/promises'; +import { join } from 'node:path'; +import { tmpdir } from 'node:os'; +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { handleAuthRoutes } from '../src/daemon/routes/auth.js'; +import { loadTokenStore, type TokenStore } from '../src/auth.js'; +import type { RequestContext } from '../src/daemon/routes/context.js'; + +function buildMockReq( + method: string, + path: string, + body?: unknown, + authHeader?: string, +): Readable & { method: string; url: string; headers: Record } { + const stream = new Readable(); + if (body !== undefined) { + stream.push(typeof body === 'string' ? body : JSON.stringify(body)); + } + stream.push(null); + Object.assign(stream, { + method, + url: path, + headers: { authorization: authHeader }, + }); + return stream as Readable & { + method: string; + url: string; + headers: Record; + }; +} + +interface CapturedResponse { + status: number; + body: unknown; + headers: Record; +} + +function buildMockRes(): { res: any; captured: CapturedResponse } { + const captured: CapturedResponse = { status: 0, body: undefined, headers: {} }; + const res: any = { + headersSent: false, + writableEnded: false, + writeHead(status: number, headers?: Record) { + captured.status = status; + if (headers) Object.assign(captured.headers, headers); + return res; + }, + end(payload?: string) { + res.writableEnded = true; + if (payload !== undefined) { + try { + captured.body = JSON.parse(payload); + } catch { + captured.body = payload; + } + } + }, + }; + return { res, captured }; +} + +function buildCtx( + req: any, + res: any, + store: TokenStore, + validTokens: Set, + token?: string, +): RequestContext { + const url = new URL(`http://localhost${req.url}`); + return { + req, + res, + path: url.pathname, + url, + requestToken: token, + tokenStore: store, + validTokens, + } as unknown as RequestContext; +} + +// ─────────────────────────────────────────────────────────────────────────── +// Setup helpers +// ─────────────────────────────────────────────────────────────────────────── + +async function loadStoreAndSet(): Promise<{ store: TokenStore; validTokens: Set }> { + const store = await loadTokenStore(); + const validTokens = new Set([...store.values()].map((r) => r.fullToken)); + return { store, validTokens }; +} + +describe('POST /api/auth/tokens — mint', () => { + let dkgHome: string; + const ROOT_TOKEN = 'root-token-aaaaaaaaaaaaaaaaaaaaa'; + + beforeEach(async () => { + dkgHome = await mkdtemp(join(tmpdir(), 'dkg-mint-')); + process.env.DKG_HOME = dkgHome; + // Pre-populate with a single root (legacy) token so we have a known + // caller to exercise the root-only check. + await writeFile(join(dkgHome, 'auth.token'), `${ROOT_TOKEN}\n`); + }); + + afterEach(async () => { + delete process.env.DKG_HOME; + await rm(dkgHome, { recursive: true, force: true }); + }); + + it('mints a scoped token; response carries the full secret ONCE', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('POST', '/api/auth/tokens', { + scope: 'kafka:endpoint:write', + name: 'kafka-publisher-bot', + }, `Bearer ${ROOT_TOKEN}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(201); + const body = captured.body as any; + expect(typeof body.token).toBe('string'); + expect(body.token.length).toBeGreaterThan(20); + expect(body.prefix.length).toBe(8); + expect(body.prefix).toBe(body.token.slice(0, 8)); + expect(body.scopes).toEqual(['kafka:endpoint:write']); + expect(body.name).toBe('kafka-publisher-bot'); + expect(typeof body.createdAt).toBe('string'); + + // The minted token now lives in the store and on disk. + expect(validTokens.has(body.token)).toBe(true); + const onDisk = await readFile(join(dkgHome, 'auth.token'), 'utf-8'); + expect(onDisk).toContain(body.token); + expect(onDisk).toContain('kafka:endpoint:write'); + expect(onDisk).toContain('kafka-publisher-bot'); + }); + + it('mints with a comma-separated scope string', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('POST', '/api/auth/tokens', { + scope: 'kafka:endpoint:read, kafka:endpoint:write', + }, `Bearer ${ROOT_TOKEN}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(201); + expect((captured.body as any).scopes).toEqual([ + 'kafka:endpoint:read', + 'kafka:endpoint:write', + ]); + }); + + it('mints with a scopes array', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('POST', '/api/auth/tokens', { + scopes: ['kafka:endpoint:read'], + }, `Bearer ${ROOT_TOKEN}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(201); + expect((captured.body as any).scopes).toEqual(['kafka:endpoint:read']); + }); + + it('rejects when no scope is supplied (400)', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('POST', '/api/auth/tokens', {}, `Bearer ${ROOT_TOKEN}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(400); + expect((captured.body as any).error).toContain('scope'); + }); + + it('rejects an attempt to mint a wildcard "*" scope (privilege-escalation guard)', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('POST', '/api/auth/tokens', { + scope: '*', + }, `Bearer ${ROOT_TOKEN}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(400); + expect((captured.body as any).error).toContain('wildcard'); + }); + + it('rejects an attempt to mint an auth:tokens:* scope (privilege-escalation guard)', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('POST', '/api/auth/tokens', { + scope: 'auth:tokens:write', + }, `Bearer ${ROOT_TOKEN}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(400); + expect((captured.body as any).error).toContain('reserved'); + }); + + it('rejects when a scoped (non-root) token attempts to mint (403)', async () => { + // Pre-create a scoped (non-root) token on disk so it lands in the + // loaded store. + const SCOPED = 'scoped-only-bbbbbbbbbbbbbbbbbbbbb'; + await writeFile( + join(dkgHome, 'auth.token'), + `${ROOT_TOKEN}\n${SCOPED}\tkafka:endpoint:read\n`, + ); + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('POST', '/api/auth/tokens', { + scope: 'kafka:endpoint:write', + }, `Bearer ${SCOPED}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, SCOPED)); + expect(captured.status).toBe(403); + expect((captured.body as any).error).toContain('Root'); + }); +}); + +// ─────────────────────────────────────────────────────────────────────────── +// GET /api/auth/tokens — list (no secrets) +// ─────────────────────────────────────────────────────────────────────────── + +describe('GET /api/auth/tokens — list', () => { + let dkgHome: string; + const ROOT_TOKEN = 'root-token-cccccccccccccccccccccc'; + + beforeEach(async () => { + dkgHome = await mkdtemp(join(tmpdir(), 'dkg-list-')); + process.env.DKG_HOME = dkgHome; + // Pre-populate with one root + one scoped token. + await writeFile( + join(dkgHome, 'auth.token'), + `${ROOT_TOKEN}\n` + + `scoped-Aaaaaaaaaaaaaaaaaaaa\tkafka:endpoint:read\tcatchup-bot\t2026-05-04T12:00:00.000Z\n`, + ); + }); + + afterEach(async () => { + delete process.env.DKG_HOME; + await rm(dkgHome, { recursive: true, force: true }); + }); + + it('lists tokens with prefix + scopes only (no fullToken)', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq('GET', '/api/auth/tokens', undefined, `Bearer ${ROOT_TOKEN}`); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(200); + const body = captured.body as { tokens: Array> }; + expect(body.tokens.length).toBe(2); + for (const t of body.tokens) { + expect((t as any).fullToken).toBeUndefined(); + expect((t as any).token).toBeUndefined(); + expect(typeof t.prefix).toBe('string'); + expect((t.prefix as string).length).toBeLessThanOrEqual(8); + } + // The full token must NOT appear ANYWHERE in the serialized response. + expect(JSON.stringify(captured.body)).not.toContain(ROOT_TOKEN); + expect(JSON.stringify(captured.body)).not.toContain('scoped-Aaaaaaaaaaaaaaaaaaaa'); + }); + + it('rejects a scoped (non-root) caller with 403', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq( + 'GET', + '/api/auth/tokens', + undefined, + `Bearer scoped-Aaaaaaaaaaaaaaaaaaaa`, + ); + const { res, captured } = buildMockRes(); + await handleAuthRoutes( + buildCtx(req, res, store, validTokens, 'scoped-Aaaaaaaaaaaaaaaaaaaa'), + ); + expect(captured.status).toBe(403); + }); +}); + +// ─────────────────────────────────────────────────────────────────────────── +// DELETE /api/auth/tokens/ — revoke +// ─────────────────────────────────────────────────────────────────────────── + +describe('DELETE /api/auth/tokens/', () => { + let dkgHome: string; + const ROOT_TOKEN = 'root-token-ddddddddddddddddddddd'; + + beforeEach(async () => { + dkgHome = await mkdtemp(join(tmpdir(), 'dkg-revoke-')); + process.env.DKG_HOME = dkgHome; + await writeFile( + join(dkgHome, 'auth.token'), + `${ROOT_TOKEN}\n` + + `target01XXXXXXXXXXXXX\tkafka:endpoint:read\n`, + ); + }); + + afterEach(async () => { + delete process.env.DKG_HOME; + await rm(dkgHome, { recursive: true, force: true }); + }); + + it('revokes by prefix: 204 on hit, file no longer contains the token', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq( + 'DELETE', + '/api/auth/tokens/target01', + undefined, + `Bearer ${ROOT_TOKEN}`, + ); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(204); + const onDisk = await readFile(join(dkgHome, 'auth.token'), 'utf-8'); + expect(onDisk).not.toContain('target01XXXXXXXXXXXXX'); + expect(validTokens.has('target01XXXXXXXXXXXXX')).toBe(false); + }); + + it('returns 404 when revoking a non-existent prefix (idempotent)', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq( + 'DELETE', + '/api/auth/tokens/notreall', + undefined, + `Bearer ${ROOT_TOKEN}`, + ); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(404); + // Repeating the call MUST stay deterministic (still 404). + const req2 = buildMockReq( + 'DELETE', + '/api/auth/tokens/notreall', + undefined, + `Bearer ${ROOT_TOKEN}`, + ); + const { res: res2, captured: captured2 } = buildMockRes(); + await handleAuthRoutes(buildCtx(req2, res2, store, validTokens, ROOT_TOKEN)); + expect(captured2.status).toBe(404); + }); + + it('refuses to revoke the bearer token used to make the request (400)', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const rootPrefix = ROOT_TOKEN.slice(0, 8); + const req = buildMockReq( + 'DELETE', + `/api/auth/tokens/${rootPrefix}`, + undefined, + `Bearer ${ROOT_TOKEN}`, + ); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + expect(captured.status).toBe(400); + expect((captured.body as any).error).toContain('Refusing to revoke'); + }); + + it('rejects a scoped (non-root) caller with 403', async () => { + const { store, validTokens } = await loadStoreAndSet(); + const req = buildMockReq( + 'DELETE', + '/api/auth/tokens/target01', + undefined, + `Bearer target01XXXXXXXXXXXXX`, + ); + const { res, captured } = buildMockRes(); + await handleAuthRoutes( + buildCtx(req, res, store, validTokens, 'target01XXXXXXXXXXXXX'), + ); + expect(captured.status).toBe(403); + }); +}); + +// ─────────────────────────────────────────────────────────────────────────── +// END-TO-END: mint → list → use → revoke +// ─────────────────────────────────────────────────────────────────────────── + +describe('end-to-end mint flow', () => { + let dkgHome: string; + const ROOT_TOKEN = 'root-token-eeeeeeeeeeeeeeeeeeeee'; + + beforeEach(async () => { + dkgHome = await mkdtemp(join(tmpdir(), 'dkg-e2e-mint-')); + process.env.DKG_HOME = dkgHome; + await writeFile(join(dkgHome, 'auth.token'), `${ROOT_TOKEN}\n`); + }); + + afterEach(async () => { + delete process.env.DKG_HOME; + await rm(dkgHome, { recursive: true, force: true }); + }); + + it('mint → list reveals it (no secret) → revoke clears it from list', async () => { + const { store, validTokens } = await loadStoreAndSet(); + + // mint + const mintReq = buildMockReq( + 'POST', + '/api/auth/tokens', + { scope: 'kafka:endpoint:write', name: 'producer' }, + `Bearer ${ROOT_TOKEN}`, + ); + const { res: mintRes, captured: mintCaptured } = buildMockRes(); + await handleAuthRoutes(buildCtx(mintReq, mintRes, store, validTokens, ROOT_TOKEN)); + expect(mintCaptured.status).toBe(201); + const mintBody = mintCaptured.body as { token: string; prefix: string }; + + // list — appears, no secret + const listReq = buildMockReq('GET', '/api/auth/tokens', undefined, `Bearer ${ROOT_TOKEN}`); + const { res: listRes, captured: listCaptured } = buildMockRes(); + await handleAuthRoutes(buildCtx(listReq, listRes, store, validTokens, ROOT_TOKEN)); + expect(listCaptured.status).toBe(200); + const listBody = listCaptured.body as { tokens: Array<{ prefix: string }> }; + const found = listBody.tokens.find((t) => t.prefix === mintBody.prefix); + expect(found).toBeDefined(); + expect(JSON.stringify(listCaptured.body)).not.toContain(mintBody.token); + + // revoke + const revokeReq = buildMockReq( + 'DELETE', + `/api/auth/tokens/${mintBody.prefix}`, + undefined, + `Bearer ${ROOT_TOKEN}`, + ); + const { res: revokeRes, captured: revokeCaptured } = buildMockRes(); + await handleAuthRoutes( + buildCtx(revokeReq, revokeRes, store, validTokens, ROOT_TOKEN), + ); + expect(revokeCaptured.status).toBe(204); + + // list again — no longer present + const listReq2 = buildMockReq('GET', '/api/auth/tokens', undefined, `Bearer ${ROOT_TOKEN}`); + const { res: listRes2, captured: listCaptured2 } = buildMockRes(); + await handleAuthRoutes(buildCtx(listReq2, listRes2, store, validTokens, ROOT_TOKEN)); + expect( + (listCaptured2.body as { tokens: Array<{ prefix: string }> }).tokens.find( + (t) => t.prefix === mintBody.prefix, + ), + ).toBeUndefined(); + }); +}); diff --git a/packages/cli/test/token-store-concurrency.test.ts b/packages/cli/test/token-store-concurrency.test.ts new file mode 100644 index 000000000..2ebb0b421 --- /dev/null +++ b/packages/cli/test/token-store-concurrency.test.ts @@ -0,0 +1,171 @@ +/** + * Slice 06 — concurrency test for parallel token mints. + * + * Reuses the real mint route (not a stub) so the read-modify-write + * critical section + `writeFileAtomic` + the in-process mutex are + * exercised together. The invariant we're guarding: + * + * `Promise.all` of N parallel mints produces a well-formed token file + * that, when re-parsed from disk, contains all N records (no truncation, + * no interleaved bytes, no lost records). + * + * Without the mutex, two parallel mints would each read the same + * pre-write snapshot and each WRITE only its own delta back, so N-1 + * mints would silently disappear from the file. The mutex serializes + * the critical section per-process; the test would fail with N=10 → 1-2 + * records on disk. + * + * Multi-process mints (e.g. CLI + daemon-API simultaneously) are out of + * scope for slice 06 and are documented as a known limitation in the + * route module. + */ +import { Readable } from 'node:stream'; +import { mkdtemp, readFile, rm, writeFile } from 'node:fs/promises'; +import { join } from 'node:path'; +import { tmpdir } from 'node:os'; +import { afterEach, beforeEach, describe, expect, it } from 'vitest'; +import { handleAuthRoutes } from '../src/daemon/routes/auth.js'; +import { + loadTokenStore, + parseTokenFile, + type TokenStore, +} from '../src/auth.js'; +import type { RequestContext } from '../src/daemon/routes/context.js'; + +function buildMockReq( + method: string, + path: string, + body: unknown, + authHeader: string, +): Readable & { method: string; url: string; headers: Record } { + const stream = new Readable(); + stream.push(JSON.stringify(body)); + stream.push(null); + Object.assign(stream, { + method, + url: path, + headers: { authorization: authHeader }, + }); + return stream as Readable & { + method: string; + url: string; + headers: Record; + }; +} + +interface CapturedResponse { + status: number; + body: unknown; +} +function buildMockRes(): { res: any; captured: CapturedResponse } { + const captured: CapturedResponse = { status: 0, body: undefined }; + const res: any = { + headersSent: false, + writableEnded: false, + writeHead(status: number) { + captured.status = status; + return res; + }, + end(payload?: string) { + res.writableEnded = true; + if (payload !== undefined) { + try { + captured.body = JSON.parse(payload); + } catch { + captured.body = payload; + } + } + }, + }; + return { res, captured }; +} + +function buildCtx( + req: any, + res: any, + store: TokenStore, + validTokens: Set, + token: string, +): RequestContext { + const url = new URL(`http://localhost${req.url}`); + return { + req, + res, + path: url.pathname, + url, + requestToken: token, + tokenStore: store, + validTokens, + } as unknown as RequestContext; +} + +describe('parallel mint concurrency', () => { + let dkgHome: string; + const ROOT_TOKEN = 'root-token-fffffffffffffffffffffff'; + + beforeEach(async () => { + dkgHome = await mkdtemp(join(tmpdir(), 'dkg-mint-concurrency-')); + process.env.DKG_HOME = dkgHome; + await writeFile(join(dkgHome, 'auth.token'), `${ROOT_TOKEN}\n`); + }); + + afterEach(async () => { + delete process.env.DKG_HOME; + await rm(dkgHome, { recursive: true, force: true }); + }); + + it('Promise.all of 10 parallel mints yields a well-formed file with all 10 records', async () => { + const store = await loadTokenStore(); + const validTokens = new Set([...store.values()].map((r) => r.fullToken)); + + const N = 10; + const mintOne = async (i: number): Promise<{ status: number; token: string; prefix: string }> => { + const req = buildMockReq( + 'POST', + '/api/auth/tokens', + { scope: 'kafka:endpoint:read', name: `parallel-${i}` }, + `Bearer ${ROOT_TOKEN}`, + ); + const { res, captured } = buildMockRes(); + await handleAuthRoutes(buildCtx(req, res, store, validTokens, ROOT_TOKEN)); + const body = captured.body as { token: string; prefix: string }; + return { status: captured.status, token: body.token, prefix: body.prefix }; + }; + + const results = await Promise.all( + Array.from({ length: N }, (_unused, i) => mintOne(i)), + ); + + // Every mint succeeded. + for (const r of results) { + expect(r.status).toBe(201); + expect(typeof r.token).toBe('string'); + expect(r.token.length).toBeGreaterThan(20); + } + + // The on-disk file is well-formed: re-parsing it yields the root + + // all N minted records, with no malformed-line warnings. + const onDisk = await readFile(join(dkgHome, 'auth.token'), 'utf-8'); + const warnings: string[] = []; + const parsed = parseTokenFile(onDisk, { onWarning: (m) => warnings.push(m) }); + + expect(warnings).toEqual([]); + expect(parsed.store.size).toBe(N + 1); // root + 10 minted + + // Every minted prefix is present on disk. + for (const r of results) { + const record = parsed.store.get(r.prefix); + expect(record).toBeDefined(); + expect(record!.fullToken).toBe(r.token); + expect(record!.scopes).toEqual(['kafka:endpoint:read']); + expect(record!.name).toMatch(/^parallel-\d$/); + expect(record!.createdAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + } + + // The root token survived unchanged. + const rootPrefix = ROOT_TOKEN.slice(0, 8); + const rootRecord = parsed.store.get(rootPrefix); + expect(rootRecord?.fullToken).toBe(ROOT_TOKEN); + expect(rootRecord?.scopes).toBe('*'); + }); +}); From 5281a2fc968ac09a7dd07ced364aa83bb33f04ac Mon Sep 17 00:00:00 2001 From: Zvonimir Date: Tue, 5 May 2026 14:31:59 +0200 Subject: [PATCH 3/7] feat(cli): add dkg auth mint-token / list-tokens / revoke-token (slice 06 phase 3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CLI surface for the slice 06 token-admin API. Subcommands sit under the existing `dkg auth` group (alongside `show`, `rotate`, `status`) so we don't proliferate top-level groups. dkg auth mint-token --scope [--name